diff --git a/llvm/test/CodeGen/RISCV/rvv/vfadd-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfadd-rv64.ll deleted file mode 100644 index b91d86befbc87c42faca6e2102435c57551f3c9d..0000000000000000000000000000000000000000 --- a/llvm/test/CodeGen/RISCV/rvv/vfadd-rv64.ll +++ /dev/null @@ -1,1356 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v -mattr=+zfh \ -; RUN: -mattr=+d -verify-machineinstrs \ -; RUN: -target-abi=lp64d < %s | FileCheck %s -declare @llvm.riscv.vfadd.nxv1f16.nxv1f16( - , - , - i64); - -define @intrinsic_vfadd_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfadd_vv_nxv1f16_nxv1f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfadd.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfadd.nxv1f16.nxv1f16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16( - , - , - , - , - i64, - i64); - -define @intrinsic_vfadd_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv1f16_nxv1f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfadd.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfadd.nxv2f16.nxv2f16( - , - , - i64); - -define @intrinsic_vfadd_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfadd_vv_nxv2f16_nxv2f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfadd.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfadd.nxv2f16.nxv2f16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfadd.mask.nxv2f16.nxv2f16( - , - , - , - , - i64, - i64); - -define @intrinsic_vfadd_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv2f16_nxv2f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfadd.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfadd.mask.nxv2f16.nxv2f16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfadd.nxv4f16.nxv4f16( - , - , - i64); - -define @intrinsic_vfadd_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfadd_vv_nxv4f16_nxv4f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfadd.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfadd.nxv4f16.nxv4f16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfadd.mask.nxv4f16.nxv4f16( - , - , - , - , - i64, - i64); - -define @intrinsic_vfadd_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv4f16_nxv4f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfadd.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfadd.mask.nxv4f16.nxv4f16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfadd.nxv8f16.nxv8f16( - , - , - i64); - -define @intrinsic_vfadd_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfadd_vv_nxv8f16_nxv8f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfadd.vv v8, v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfadd.nxv8f16.nxv8f16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfadd.mask.nxv8f16.nxv8f16( - , - , - , - , - i64, - i64); - -define @intrinsic_vfadd_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv8f16_nxv8f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfadd.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfadd.mask.nxv8f16.nxv8f16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfadd.nxv16f16.nxv16f16( - , - , - i64); - -define @intrinsic_vfadd_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfadd_vv_nxv16f16_nxv16f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfadd.vv v8, v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfadd.nxv16f16.nxv16f16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfadd.mask.nxv16f16.nxv16f16( - , - , - , - , - i64, - i64); - -define @intrinsic_vfadd_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv16f16_nxv16f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfadd.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfadd.mask.nxv16f16.nxv16f16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfadd.nxv32f16.nxv32f16( - , - , - i64); - -define @intrinsic_vfadd_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfadd_vv_nxv32f16_nxv32f16_nxv32f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu -; CHECK-NEXT: vfadd.vv v8, v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfadd.nxv32f16.nxv32f16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfadd.mask.nxv32f16.nxv32f16( - , - , - , - , - i64, - i64); - -define @intrinsic_vfadd_mask_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv32f16_nxv32f16_nxv32f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu -; CHECK-NEXT: vfadd.vv v8, v16, v24, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfadd.mask.nxv32f16.nxv32f16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfadd.nxv1f32.nxv1f32( - , - , - i64); - -define @intrinsic_vfadd_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfadd_vv_nxv1f32_nxv1f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfadd.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfadd.nxv1f32.nxv1f32( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32( - , - , - , - , - i64, - i64); - -define @intrinsic_vfadd_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv1f32_nxv1f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfadd.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfadd.nxv2f32.nxv2f32( - , - , - i64); - -define @intrinsic_vfadd_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfadd_vv_nxv2f32_nxv2f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfadd.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfadd.nxv2f32.nxv2f32( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32( - , - , - , - , - i64, - i64); - -define @intrinsic_vfadd_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv2f32_nxv2f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfadd.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfadd.nxv4f32.nxv4f32( - , - , - i64); - -define @intrinsic_vfadd_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfadd_vv_nxv4f32_nxv4f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfadd.vv v8, v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfadd.nxv4f32.nxv4f32( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32( - , - , - , - , - i64, - i64); - -define @intrinsic_vfadd_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv4f32_nxv4f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfadd.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfadd.nxv8f32.nxv8f32( - , - , - i64); - -define @intrinsic_vfadd_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfadd_vv_nxv8f32_nxv8f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfadd.vv v8, v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfadd.nxv8f32.nxv8f32( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32( - , - , - , - , - i64, - i64); - -define @intrinsic_vfadd_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv8f32_nxv8f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfadd.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfadd.nxv16f32.nxv16f32( - , - , - i64); - -define @intrinsic_vfadd_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfadd_vv_nxv16f32_nxv16f32_nxv16f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu -; CHECK-NEXT: vfadd.vv v8, v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfadd.nxv16f32.nxv16f32( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32( - , - , - , - , - i64, - i64); - -define @intrinsic_vfadd_mask_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv16f32_nxv16f32_nxv16f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vl8re32.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu -; CHECK-NEXT: vfadd.vv v8, v16, v24, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfadd.nxv1f64.nxv1f64( - , - , - i64); - -define @intrinsic_vfadd_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfadd_vv_nxv1f64_nxv1f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vfadd.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfadd.nxv1f64.nxv1f64( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfadd.mask.nxv1f64.nxv1f64( - , - , - , - , - i64, - i64); - -define @intrinsic_vfadd_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv1f64_nxv1f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vfadd.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfadd.mask.nxv1f64.nxv1f64( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfadd.nxv2f64.nxv2f64( - , - , - i64); - -define @intrinsic_vfadd_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfadd_vv_nxv2f64_nxv2f64_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vfadd.vv v8, v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfadd.nxv2f64.nxv2f64( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfadd.mask.nxv2f64.nxv2f64( - , - , - , - , - i64, - i64); - -define @intrinsic_vfadd_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv2f64_nxv2f64_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vfadd.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfadd.mask.nxv2f64.nxv2f64( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfadd.nxv4f64.nxv4f64( - , - , - i64); - -define @intrinsic_vfadd_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfadd_vv_nxv4f64_nxv4f64_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vfadd.vv v8, v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfadd.nxv4f64.nxv4f64( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfadd.mask.nxv4f64.nxv4f64( - , - , - , - , - i64, - i64); - -define @intrinsic_vfadd_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv4f64_nxv4f64_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vfadd.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfadd.mask.nxv4f64.nxv4f64( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfadd.nxv8f64.nxv8f64( - , - , - i64); - -define @intrinsic_vfadd_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfadd_vv_nxv8f64_nxv8f64_nxv8f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu -; CHECK-NEXT: vfadd.vv v8, v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfadd.nxv8f64.nxv8f64( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64( - , - , - , - , - i64, - i64); - -define @intrinsic_vfadd_mask_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv8f64_nxv8f64_nxv8f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vl8re64.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu -; CHECK-NEXT: vfadd.vv v8, v16, v24, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfadd.nxv1f16.f16( - , - half, - i64); - -define @intrinsic_vfadd_vf_nxv1f16_nxv1f16_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfadd_vf_nxv1f16_nxv1f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfadd.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfadd.nxv1f16.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfadd.mask.nxv1f16.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfadd_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv1f16_nxv1f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfadd.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfadd.mask.nxv1f16.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfadd.nxv2f16.f16( - , - half, - i64); - -define @intrinsic_vfadd_vf_nxv2f16_nxv2f16_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfadd_vf_nxv2f16_nxv2f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfadd.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfadd.nxv2f16.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfadd.mask.nxv2f16.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfadd_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv2f16_nxv2f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfadd.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfadd.mask.nxv2f16.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfadd.nxv4f16.f16( - , - half, - i64); - -define @intrinsic_vfadd_vf_nxv4f16_nxv4f16_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfadd_vf_nxv4f16_nxv4f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfadd.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfadd.nxv4f16.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfadd.mask.nxv4f16.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfadd_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv4f16_nxv4f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfadd.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfadd.mask.nxv4f16.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfadd.nxv8f16.f16( - , - half, - i64); - -define @intrinsic_vfadd_vf_nxv8f16_nxv8f16_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfadd_vf_nxv8f16_nxv8f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfadd.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfadd.nxv8f16.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfadd.mask.nxv8f16.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfadd_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv8f16_nxv8f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfadd.vf v8, v10, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfadd.mask.nxv8f16.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfadd.nxv16f16.f16( - , - half, - i64); - -define @intrinsic_vfadd_vf_nxv16f16_nxv16f16_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfadd_vf_nxv16f16_nxv16f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfadd.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfadd.nxv16f16.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfadd.mask.nxv16f16.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfadd_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv16f16_nxv16f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfadd.vf v8, v12, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfadd.mask.nxv16f16.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfadd.nxv32f16.f16( - , - half, - i64); - -define @intrinsic_vfadd_vf_nxv32f16_nxv32f16_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfadd_vf_nxv32f16_nxv32f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu -; CHECK-NEXT: vfadd.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfadd.nxv32f16.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfadd.mask.nxv32f16.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfadd_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv32f16_nxv32f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu -; CHECK-NEXT: vfadd.vf v8, v16, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfadd.mask.nxv32f16.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfadd.nxv1f32.f32( - , - float, - i64); - -define @intrinsic_vfadd_vf_nxv1f32_nxv1f32_f32( %0, float %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfadd_vf_nxv1f32_nxv1f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfadd.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfadd.nxv1f32.f32( - %0, - float %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfadd.mask.nxv1f32.f32( - , - , - float, - , - i64, - i64); - -define @intrinsic_vfadd_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv1f32_nxv1f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfadd.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfadd.mask.nxv1f32.f32( - %0, - %1, - float %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfadd.nxv2f32.f32( - , - float, - i64); - -define @intrinsic_vfadd_vf_nxv2f32_nxv2f32_f32( %0, float %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfadd_vf_nxv2f32_nxv2f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfadd.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfadd.nxv2f32.f32( - %0, - float %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfadd.mask.nxv2f32.f32( - , - , - float, - , - i64, - i64); - -define @intrinsic_vfadd_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv2f32_nxv2f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfadd.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfadd.mask.nxv2f32.f32( - %0, - %1, - float %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfadd.nxv4f32.f32( - , - float, - i64); - -define @intrinsic_vfadd_vf_nxv4f32_nxv4f32_f32( %0, float %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfadd_vf_nxv4f32_nxv4f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfadd.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfadd.nxv4f32.f32( - %0, - float %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfadd.mask.nxv4f32.f32( - , - , - float, - , - i64, - i64); - -define @intrinsic_vfadd_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv4f32_nxv4f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfadd.vf v8, v10, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfadd.mask.nxv4f32.f32( - %0, - %1, - float %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfadd.nxv8f32.f32( - , - float, - i64); - -define @intrinsic_vfadd_vf_nxv8f32_nxv8f32_f32( %0, float %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfadd_vf_nxv8f32_nxv8f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfadd.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfadd.nxv8f32.f32( - %0, - float %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfadd.mask.nxv8f32.f32( - , - , - float, - , - i64, - i64); - -define @intrinsic_vfadd_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv8f32_nxv8f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfadd.vf v8, v12, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfadd.mask.nxv8f32.f32( - %0, - %1, - float %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfadd.nxv16f32.f32( - , - float, - i64); - -define @intrinsic_vfadd_vf_nxv16f32_nxv16f32_f32( %0, float %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfadd_vf_nxv16f32_nxv16f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu -; CHECK-NEXT: vfadd.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfadd.nxv16f32.f32( - %0, - float %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfadd.mask.nxv16f32.f32( - , - , - float, - , - i64, - i64); - -define @intrinsic_vfadd_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv16f32_nxv16f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu -; CHECK-NEXT: vfadd.vf v8, v16, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfadd.mask.nxv16f32.f32( - %0, - %1, - float %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfadd.nxv1f64.f64( - , - double, - i64); - -define @intrinsic_vfadd_vf_nxv1f64_nxv1f64_f64( %0, double %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfadd_vf_nxv1f64_nxv1f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vfadd.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfadd.nxv1f64.f64( - %0, - double %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfadd.mask.nxv1f64.f64( - , - , - double, - , - i64, - i64); - -define @intrinsic_vfadd_mask_vf_nxv1f64_nxv1f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv1f64_nxv1f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vfadd.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfadd.mask.nxv1f64.f64( - %0, - %1, - double %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfadd.nxv2f64.f64( - , - double, - i64); - -define @intrinsic_vfadd_vf_nxv2f64_nxv2f64_f64( %0, double %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfadd_vf_nxv2f64_nxv2f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vfadd.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfadd.nxv2f64.f64( - %0, - double %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfadd.mask.nxv2f64.f64( - , - , - double, - , - i64, - i64); - -define @intrinsic_vfadd_mask_vf_nxv2f64_nxv2f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv2f64_nxv2f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vfadd.vf v8, v10, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfadd.mask.nxv2f64.f64( - %0, - %1, - double %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfadd.nxv4f64.f64( - , - double, - i64); - -define @intrinsic_vfadd_vf_nxv4f64_nxv4f64_f64( %0, double %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfadd_vf_nxv4f64_nxv4f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vfadd.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfadd.nxv4f64.f64( - %0, - double %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfadd.mask.nxv4f64.f64( - , - , - double, - , - i64, - i64); - -define @intrinsic_vfadd_mask_vf_nxv4f64_nxv4f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv4f64_nxv4f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vfadd.vf v8, v12, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfadd.mask.nxv4f64.f64( - %0, - %1, - double %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfadd.nxv8f64.f64( - , - double, - i64); - -define @intrinsic_vfadd_vf_nxv8f64_nxv8f64_f64( %0, double %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfadd_vf_nxv8f64_nxv8f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu -; CHECK-NEXT: vfadd.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfadd.nxv8f64.f64( - %0, - double %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfadd.mask.nxv8f64.f64( - , - , - double, - , - i64, - i64); - -define @intrinsic_vfadd_mask_vf_nxv8f64_nxv8f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv8f64_nxv8f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu -; CHECK-NEXT: vfadd.vf v8, v16, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfadd.mask.nxv8f64.f64( - %0, - %1, - double %2, - %3, - i64 %4, i64 1) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfadd-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfadd.ll similarity index 86% rename from llvm/test/CodeGen/RISCV/rvv/vfadd-rv32.ll rename to llvm/test/CodeGen/RISCV/rvv/vfadd.ll index 5df1881bffb231d2ae1ae17e2efb688806310f2c..041580b2b49d8abee295f7019d27e892aac9cc44 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfadd-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfadd.ll @@ -1,12 +1,14 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: -target-abi=ilp32d < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfadd.nxv1f16.nxv1f16( , , - i32); + iXLen); -define @intrinsic_vfadd_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, i32 %2) nounwind { +define @intrinsic_vfadd_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -16,7 +18,7 @@ entry: %a = call @llvm.riscv.vfadd.nxv1f16.nxv1f16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -26,10 +28,10 @@ declare @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfadd_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfadd_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -41,7 +43,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -49,9 +51,9 @@ entry: declare @llvm.riscv.vfadd.nxv2f16.nxv2f16( , , - i32); + iXLen); -define @intrinsic_vfadd_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, i32 %2) nounwind { +define @intrinsic_vfadd_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -61,7 +63,7 @@ entry: %a = call @llvm.riscv.vfadd.nxv2f16.nxv2f16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -71,10 +73,10 @@ declare @llvm.riscv.vfadd.mask.nxv2f16.nxv2f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfadd_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfadd_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -86,7 +88,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -94,9 +96,9 @@ entry: declare @llvm.riscv.vfadd.nxv4f16.nxv4f16( , , - i32); + iXLen); -define @intrinsic_vfadd_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, i32 %2) nounwind { +define @intrinsic_vfadd_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -106,7 +108,7 @@ entry: %a = call @llvm.riscv.vfadd.nxv4f16.nxv4f16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -116,10 +118,10 @@ declare @llvm.riscv.vfadd.mask.nxv4f16.nxv4f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfadd_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfadd_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -131,7 +133,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -139,9 +141,9 @@ entry: declare @llvm.riscv.vfadd.nxv8f16.nxv8f16( , , - i32); + iXLen); -define @intrinsic_vfadd_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, i32 %2) nounwind { +define @intrinsic_vfadd_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -151,7 +153,7 @@ entry: %a = call @llvm.riscv.vfadd.nxv8f16.nxv8f16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -161,10 +163,10 @@ declare @llvm.riscv.vfadd.mask.nxv8f16.nxv8f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfadd_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfadd_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -176,7 +178,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -184,9 +186,9 @@ entry: declare @llvm.riscv.vfadd.nxv16f16.nxv16f16( , , - i32); + iXLen); -define @intrinsic_vfadd_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, i32 %2) nounwind { +define @intrinsic_vfadd_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -196,7 +198,7 @@ entry: %a = call @llvm.riscv.vfadd.nxv16f16.nxv16f16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -206,10 +208,10 @@ declare @llvm.riscv.vfadd.mask.nxv16f16.nxv16f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfadd_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfadd_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -221,7 +223,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -229,9 +231,9 @@ entry: declare @llvm.riscv.vfadd.nxv32f16.nxv32f16( , , - i32); + iXLen); -define @intrinsic_vfadd_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, i32 %2) nounwind { +define @intrinsic_vfadd_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vv_nxv32f16_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu @@ -241,7 +243,7 @@ entry: %a = call @llvm.riscv.vfadd.nxv32f16.nxv32f16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -251,10 +253,10 @@ declare @llvm.riscv.vfadd.mask.nxv32f16.nxv32f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfadd_mask_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfadd_mask_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv32f16_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re16.v v24, (a0) @@ -267,7 +269,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -275,9 +277,9 @@ entry: declare @llvm.riscv.vfadd.nxv1f32.nxv1f32( , , - i32); + iXLen); -define @intrinsic_vfadd_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, i32 %2) nounwind { +define @intrinsic_vfadd_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -287,7 +289,7 @@ entry: %a = call @llvm.riscv.vfadd.nxv1f32.nxv1f32( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -297,10 +299,10 @@ declare @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfadd_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfadd_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -312,7 +314,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -320,9 +322,9 @@ entry: declare @llvm.riscv.vfadd.nxv2f32.nxv2f32( , , - i32); + iXLen); -define @intrinsic_vfadd_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, i32 %2) nounwind { +define @intrinsic_vfadd_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -332,7 +334,7 @@ entry: %a = call @llvm.riscv.vfadd.nxv2f32.nxv2f32( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -342,10 +344,10 @@ declare @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfadd_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfadd_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -357,7 +359,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -365,9 +367,9 @@ entry: declare @llvm.riscv.vfadd.nxv4f32.nxv4f32( , , - i32); + iXLen); -define @intrinsic_vfadd_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, i32 %2) nounwind { +define @intrinsic_vfadd_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -377,7 +379,7 @@ entry: %a = call @llvm.riscv.vfadd.nxv4f32.nxv4f32( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -387,10 +389,10 @@ declare @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfadd_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfadd_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -402,7 +404,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -410,9 +412,9 @@ entry: declare @llvm.riscv.vfadd.nxv8f32.nxv8f32( , , - i32); + iXLen); -define @intrinsic_vfadd_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, i32 %2) nounwind { +define @intrinsic_vfadd_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -422,7 +424,7 @@ entry: %a = call @llvm.riscv.vfadd.nxv8f32.nxv8f32( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -432,10 +434,10 @@ declare @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfadd_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfadd_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -447,7 +449,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -455,9 +457,9 @@ entry: declare @llvm.riscv.vfadd.nxv16f32.nxv16f32( , , - i32); + iXLen); -define @intrinsic_vfadd_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, i32 %2) nounwind { +define @intrinsic_vfadd_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vv_nxv16f32_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu @@ -467,7 +469,7 @@ entry: %a = call @llvm.riscv.vfadd.nxv16f32.nxv16f32( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -477,10 +479,10 @@ declare @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfadd_mask_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfadd_mask_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv16f32_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re32.v v24, (a0) @@ -493,7 +495,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -501,9 +503,9 @@ entry: declare @llvm.riscv.vfadd.nxv1f64.nxv1f64( , , - i32); + iXLen); -define @intrinsic_vfadd_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, i32 %2) nounwind { +define @intrinsic_vfadd_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu @@ -513,7 +515,7 @@ entry: %a = call @llvm.riscv.vfadd.nxv1f64.nxv1f64( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -523,10 +525,10 @@ declare @llvm.riscv.vfadd.mask.nxv1f64.nxv1f64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfadd_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfadd_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu @@ -538,7 +540,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -546,9 +548,9 @@ entry: declare @llvm.riscv.vfadd.nxv2f64.nxv2f64( , , - i32); + iXLen); -define @intrinsic_vfadd_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, i32 %2) nounwind { +define @intrinsic_vfadd_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu @@ -558,7 +560,7 @@ entry: %a = call @llvm.riscv.vfadd.nxv2f64.nxv2f64( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -568,10 +570,10 @@ declare @llvm.riscv.vfadd.mask.nxv2f64.nxv2f64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfadd_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfadd_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu @@ -583,7 +585,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -591,9 +593,9 @@ entry: declare @llvm.riscv.vfadd.nxv4f64.nxv4f64( , , - i32); + iXLen); -define @intrinsic_vfadd_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, i32 %2) nounwind { +define @intrinsic_vfadd_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu @@ -603,7 +605,7 @@ entry: %a = call @llvm.riscv.vfadd.nxv4f64.nxv4f64( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -613,10 +615,10 @@ declare @llvm.riscv.vfadd.mask.nxv4f64.nxv4f64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfadd_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfadd_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu @@ -628,7 +630,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -636,9 +638,9 @@ entry: declare @llvm.riscv.vfadd.nxv8f64.nxv8f64( , , - i32); + iXLen); -define @intrinsic_vfadd_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, i32 %2) nounwind { +define @intrinsic_vfadd_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vv_nxv8f64_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu @@ -648,7 +650,7 @@ entry: %a = call @llvm.riscv.vfadd.nxv8f64.nxv8f64( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -658,10 +660,10 @@ declare @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfadd_mask_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfadd_mask_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv8f64_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re64.v v24, (a0) @@ -674,7 +676,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -682,9 +684,9 @@ entry: declare @llvm.riscv.vfadd.nxv1f16.f16( , half, - i32); + iXLen); -define @intrinsic_vfadd_vf_nxv1f16_nxv1f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfadd_vf_nxv1f16_nxv1f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -694,7 +696,7 @@ entry: %a = call @llvm.riscv.vfadd.nxv1f16.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -704,10 +706,10 @@ declare @llvm.riscv.vfadd.mask.nxv1f16.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfadd_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfadd_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -719,7 +721,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -727,9 +729,9 @@ entry: declare @llvm.riscv.vfadd.nxv2f16.f16( , half, - i32); + iXLen); -define @intrinsic_vfadd_vf_nxv2f16_nxv2f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfadd_vf_nxv2f16_nxv2f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -739,7 +741,7 @@ entry: %a = call @llvm.riscv.vfadd.nxv2f16.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -749,10 +751,10 @@ declare @llvm.riscv.vfadd.mask.nxv2f16.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfadd_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfadd_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -764,7 +766,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -772,9 +774,9 @@ entry: declare @llvm.riscv.vfadd.nxv4f16.f16( , half, - i32); + iXLen); -define @intrinsic_vfadd_vf_nxv4f16_nxv4f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfadd_vf_nxv4f16_nxv4f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -784,7 +786,7 @@ entry: %a = call @llvm.riscv.vfadd.nxv4f16.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -794,10 +796,10 @@ declare @llvm.riscv.vfadd.mask.nxv4f16.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfadd_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfadd_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -809,7 +811,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -817,9 +819,9 @@ entry: declare @llvm.riscv.vfadd.nxv8f16.f16( , half, - i32); + iXLen); -define @intrinsic_vfadd_vf_nxv8f16_nxv8f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfadd_vf_nxv8f16_nxv8f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -829,7 +831,7 @@ entry: %a = call @llvm.riscv.vfadd.nxv8f16.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -839,10 +841,10 @@ declare @llvm.riscv.vfadd.mask.nxv8f16.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfadd_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfadd_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -854,7 +856,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -862,9 +864,9 @@ entry: declare @llvm.riscv.vfadd.nxv16f16.f16( , half, - i32); + iXLen); -define @intrinsic_vfadd_vf_nxv16f16_nxv16f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfadd_vf_nxv16f16_nxv16f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -874,7 +876,7 @@ entry: %a = call @llvm.riscv.vfadd.nxv16f16.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -884,10 +886,10 @@ declare @llvm.riscv.vfadd.mask.nxv16f16.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfadd_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfadd_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -899,7 +901,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -907,9 +909,9 @@ entry: declare @llvm.riscv.vfadd.nxv32f16.f16( , half, - i32); + iXLen); -define @intrinsic_vfadd_vf_nxv32f16_nxv32f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfadd_vf_nxv32f16_nxv32f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu @@ -919,7 +921,7 @@ entry: %a = call @llvm.riscv.vfadd.nxv32f16.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -929,10 +931,10 @@ declare @llvm.riscv.vfadd.mask.nxv32f16.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfadd_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfadd_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu @@ -944,7 +946,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -952,9 +954,9 @@ entry: declare @llvm.riscv.vfadd.nxv1f32.f32( , float, - i32); + iXLen); -define @intrinsic_vfadd_vf_nxv1f32_nxv1f32_f32( %0, float %1, i32 %2) nounwind { +define @intrinsic_vfadd_vf_nxv1f32_nxv1f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -964,7 +966,7 @@ entry: %a = call @llvm.riscv.vfadd.nxv1f32.f32( %0, float %1, - i32 %2) + iXLen %2) ret %a } @@ -974,10 +976,10 @@ declare @llvm.riscv.vfadd.mask.nxv1f32.f32( , float, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfadd_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +define @intrinsic_vfadd_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -989,7 +991,7 @@ entry: %1, float %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -997,9 +999,9 @@ entry: declare @llvm.riscv.vfadd.nxv2f32.f32( , float, - i32); + iXLen); -define @intrinsic_vfadd_vf_nxv2f32_nxv2f32_f32( %0, float %1, i32 %2) nounwind { +define @intrinsic_vfadd_vf_nxv2f32_nxv2f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -1009,7 +1011,7 @@ entry: %a = call @llvm.riscv.vfadd.nxv2f32.f32( %0, float %1, - i32 %2) + iXLen %2) ret %a } @@ -1019,10 +1021,10 @@ declare @llvm.riscv.vfadd.mask.nxv2f32.f32( , float, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfadd_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +define @intrinsic_vfadd_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -1034,7 +1036,7 @@ entry: %1, float %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1042,9 +1044,9 @@ entry: declare @llvm.riscv.vfadd.nxv4f32.f32( , float, - i32); + iXLen); -define @intrinsic_vfadd_vf_nxv4f32_nxv4f32_f32( %0, float %1, i32 %2) nounwind { +define @intrinsic_vfadd_vf_nxv4f32_nxv4f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -1054,7 +1056,7 @@ entry: %a = call @llvm.riscv.vfadd.nxv4f32.f32( %0, float %1, - i32 %2) + iXLen %2) ret %a } @@ -1064,10 +1066,10 @@ declare @llvm.riscv.vfadd.mask.nxv4f32.f32( , float, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfadd_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +define @intrinsic_vfadd_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -1079,7 +1081,7 @@ entry: %1, float %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1087,9 +1089,9 @@ entry: declare @llvm.riscv.vfadd.nxv8f32.f32( , float, - i32); + iXLen); -define @intrinsic_vfadd_vf_nxv8f32_nxv8f32_f32( %0, float %1, i32 %2) nounwind { +define @intrinsic_vfadd_vf_nxv8f32_nxv8f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -1099,7 +1101,7 @@ entry: %a = call @llvm.riscv.vfadd.nxv8f32.f32( %0, float %1, - i32 %2) + iXLen %2) ret %a } @@ -1109,10 +1111,10 @@ declare @llvm.riscv.vfadd.mask.nxv8f32.f32( , float, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfadd_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +define @intrinsic_vfadd_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -1124,7 +1126,7 @@ entry: %1, float %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1132,9 +1134,9 @@ entry: declare @llvm.riscv.vfadd.nxv16f32.f32( , float, - i32); + iXLen); -define @intrinsic_vfadd_vf_nxv16f32_nxv16f32_f32( %0, float %1, i32 %2) nounwind { +define @intrinsic_vfadd_vf_nxv16f32_nxv16f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu @@ -1144,7 +1146,7 @@ entry: %a = call @llvm.riscv.vfadd.nxv16f32.f32( %0, float %1, - i32 %2) + iXLen %2) ret %a } @@ -1154,10 +1156,10 @@ declare @llvm.riscv.vfadd.mask.nxv16f32.f32( , float, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfadd_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +define @intrinsic_vfadd_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu @@ -1169,7 +1171,7 @@ entry: %1, float %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1177,9 +1179,9 @@ entry: declare @llvm.riscv.vfadd.nxv1f64.f64( , double, - i32); + iXLen); -define @intrinsic_vfadd_vf_nxv1f64_nxv1f64_f64( %0, double %1, i32 %2) nounwind { +define @intrinsic_vfadd_vf_nxv1f64_nxv1f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu @@ -1189,7 +1191,7 @@ entry: %a = call @llvm.riscv.vfadd.nxv1f64.f64( %0, double %1, - i32 %2) + iXLen %2) ret %a } @@ -1199,10 +1201,10 @@ declare @llvm.riscv.vfadd.mask.nxv1f64.f64( , double, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfadd_mask_vf_nxv1f64_nxv1f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { +define @intrinsic_vfadd_mask_vf_nxv1f64_nxv1f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu @@ -1214,7 +1216,7 @@ entry: %1, double %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1222,9 +1224,9 @@ entry: declare @llvm.riscv.vfadd.nxv2f64.f64( , double, - i32); + iXLen); -define @intrinsic_vfadd_vf_nxv2f64_nxv2f64_f64( %0, double %1, i32 %2) nounwind { +define @intrinsic_vfadd_vf_nxv2f64_nxv2f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu @@ -1234,7 +1236,7 @@ entry: %a = call @llvm.riscv.vfadd.nxv2f64.f64( %0, double %1, - i32 %2) + iXLen %2) ret %a } @@ -1244,10 +1246,10 @@ declare @llvm.riscv.vfadd.mask.nxv2f64.f64( , double, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfadd_mask_vf_nxv2f64_nxv2f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { +define @intrinsic_vfadd_mask_vf_nxv2f64_nxv2f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu @@ -1259,7 +1261,7 @@ entry: %1, double %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1267,9 +1269,9 @@ entry: declare @llvm.riscv.vfadd.nxv4f64.f64( , double, - i32); + iXLen); -define @intrinsic_vfadd_vf_nxv4f64_nxv4f64_f64( %0, double %1, i32 %2) nounwind { +define @intrinsic_vfadd_vf_nxv4f64_nxv4f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu @@ -1279,7 +1281,7 @@ entry: %a = call @llvm.riscv.vfadd.nxv4f64.f64( %0, double %1, - i32 %2) + iXLen %2) ret %a } @@ -1289,10 +1291,10 @@ declare @llvm.riscv.vfadd.mask.nxv4f64.f64( , double, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfadd_mask_vf_nxv4f64_nxv4f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { +define @intrinsic_vfadd_mask_vf_nxv4f64_nxv4f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu @@ -1304,7 +1306,7 @@ entry: %1, double %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1312,9 +1314,9 @@ entry: declare @llvm.riscv.vfadd.nxv8f64.f64( , double, - i32); + iXLen); -define @intrinsic_vfadd_vf_nxv8f64_nxv8f64_f64( %0, double %1, i32 %2) nounwind { +define @intrinsic_vfadd_vf_nxv8f64_nxv8f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu @@ -1324,7 +1326,7 @@ entry: %a = call @llvm.riscv.vfadd.nxv8f64.f64( %0, double %1, - i32 %2) + iXLen %2) ret %a } @@ -1334,10 +1336,10 @@ declare @llvm.riscv.vfadd.mask.nxv8f64.f64( , double, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfadd_mask_vf_nxv8f64_nxv8f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { +define @intrinsic_vfadd_mask_vf_nxv8f64_nxv8f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu @@ -1349,7 +1351,7 @@ entry: %1, double %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfclass-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfclass-rv64.ll deleted file mode 100644 index c86e1f334712a69d488359a849628469df3d0ef3..0000000000000000000000000000000000000000 --- a/llvm/test/CodeGen/RISCV/rvv/vfclass-rv64.ll +++ /dev/null @@ -1,692 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s -declare @llvm.riscv.vfclass.nxv1i16( - , - i64); - -define @intrinsic_vfclass_v_nxv1i16_nxv1f16( -; CHECK-LABEL: intrinsic_vfclass_v_nxv1i16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfclass.v v8, v8 -; CHECK-NEXT: ret - %0, - i64 %1) nounwind { -entry: - %a = call @llvm.riscv.vfclass.nxv1i16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfclass.mask.nxv1i16( - , - , - , - i64); - -define @intrinsic_vfclass_mask_v_nxv1i16_nxv1f16( -; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv1i16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu -; CHECK-NEXT: vfclass.v v8, v9, v0.t -; CHECK-NEXT: ret - %0, - %1, - %2, - i64 %3) nounwind { -entry: - %a = call @llvm.riscv.vfclass.mask.nxv1i16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfclass.nxv2i16( - , - i64); - -define @intrinsic_vfclass_v_nxv2i16_nxv2f16( -; CHECK-LABEL: intrinsic_vfclass_v_nxv2i16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfclass.v v8, v8 -; CHECK-NEXT: ret - %0, - i64 %1) nounwind { -entry: - %a = call @llvm.riscv.vfclass.nxv2i16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfclass.mask.nxv2i16( - , - , - , - i64); - -define @intrinsic_vfclass_mask_v_nxv2i16_nxv2f16( -; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv2i16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu -; CHECK-NEXT: vfclass.v v8, v9, v0.t -; CHECK-NEXT: ret - %0, - %1, - %2, - i64 %3) nounwind { -entry: - %a = call @llvm.riscv.vfclass.mask.nxv2i16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfclass.nxv4i16( - , - i64); - -define @intrinsic_vfclass_v_nxv4i16_nxv4f16( -; CHECK-LABEL: intrinsic_vfclass_v_nxv4i16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfclass.v v8, v8 -; CHECK-NEXT: ret - %0, - i64 %1) nounwind { -entry: - %a = call @llvm.riscv.vfclass.nxv4i16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfclass.mask.nxv4i16( - , - , - , - i64); - -define @intrinsic_vfclass_mask_v_nxv4i16_nxv4f16( -; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv4i16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu -; CHECK-NEXT: vfclass.v v8, v9, v0.t -; CHECK-NEXT: ret - %0, - %1, - %2, - i64 %3) nounwind { -entry: - %a = call @llvm.riscv.vfclass.mask.nxv4i16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfclass.nxv8i16( - , - i64); - -define @intrinsic_vfclass_v_nxv8i16_nxv8f16( -; CHECK-LABEL: intrinsic_vfclass_v_nxv8i16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfclass.v v8, v8 -; CHECK-NEXT: ret - %0, - i64 %1) nounwind { -entry: - %a = call @llvm.riscv.vfclass.nxv8i16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfclass.mask.nxv8i16( - , - , - , - i64); - -define @intrinsic_vfclass_mask_v_nxv8i16_nxv8f16( -; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv8i16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu -; CHECK-NEXT: vfclass.v v8, v10, v0.t -; CHECK-NEXT: ret - %0, - %1, - %2, - i64 %3) nounwind { -entry: - %a = call @llvm.riscv.vfclass.mask.nxv8i16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfclass.nxv16i16( - , - i64); - -define @intrinsic_vfclass_v_nxv16i16_nxv16f16( -; CHECK-LABEL: intrinsic_vfclass_v_nxv16i16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfclass.v v8, v8 -; CHECK-NEXT: ret - %0, - i64 %1) nounwind { -entry: - %a = call @llvm.riscv.vfclass.nxv16i16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfclass.mask.nxv16i16( - , - , - , - i64); - -define @intrinsic_vfclass_mask_v_nxv16i16_nxv16f16( -; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv16i16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu -; CHECK-NEXT: vfclass.v v8, v12, v0.t -; CHECK-NEXT: ret - %0, - %1, - %2, - i64 %3) nounwind { -entry: - %a = call @llvm.riscv.vfclass.mask.nxv16i16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfclass.nxv32i16( - , - i64); - -define @intrinsic_vfclass_v_nxv32i16_nxv32f16( -; CHECK-LABEL: intrinsic_vfclass_v_nxv32i16_nxv32f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu -; CHECK-NEXT: vfclass.v v8, v8 -; CHECK-NEXT: ret - %0, - i64 %1) nounwind { -entry: - %a = call @llvm.riscv.vfclass.nxv32i16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfclass.mask.nxv32i16( - , - , - , - i64); - -define @intrinsic_vfclass_mask_v_nxv32i16_nxv32f16( -; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv32i16_nxv32f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu -; CHECK-NEXT: vfclass.v v8, v16, v0.t -; CHECK-NEXT: ret - %0, - %1, - %2, - i64 %3) nounwind { -entry: - %a = call @llvm.riscv.vfclass.mask.nxv32i16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfclass.nxv1i32( - , - i64); - -define @intrinsic_vfclass_v_nxv1i32_nxv1f32( -; CHECK-LABEL: intrinsic_vfclass_v_nxv1i32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfclass.v v8, v8 -; CHECK-NEXT: ret - %0, - i64 %1) nounwind { -entry: - %a = call @llvm.riscv.vfclass.nxv1i32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfclass.mask.nxv1i32( - , - , - , - i64); - -define @intrinsic_vfclass_mask_v_nxv1i32_nxv1f32( -; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv1i32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu -; CHECK-NEXT: vfclass.v v8, v9, v0.t -; CHECK-NEXT: ret - %0, - %1, - %2, - i64 %3) nounwind { -entry: - %a = call @llvm.riscv.vfclass.mask.nxv1i32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfclass.nxv2i32( - , - i64); - -define @intrinsic_vfclass_v_nxv2i32_nxv2f32( -; CHECK-LABEL: intrinsic_vfclass_v_nxv2i32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfclass.v v8, v8 -; CHECK-NEXT: ret - %0, - i64 %1) nounwind { -entry: - %a = call @llvm.riscv.vfclass.nxv2i32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfclass.mask.nxv2i32( - , - , - , - i64); - -define @intrinsic_vfclass_mask_v_nxv2i32_nxv2f32( -; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv2i32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu -; CHECK-NEXT: vfclass.v v8, v9, v0.t -; CHECK-NEXT: ret - %0, - %1, - %2, - i64 %3) nounwind { -entry: - %a = call @llvm.riscv.vfclass.mask.nxv2i32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfclass.nxv4i32( - , - i64); - -define @intrinsic_vfclass_v_nxv4i32_nxv4f32( -; CHECK-LABEL: intrinsic_vfclass_v_nxv4i32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfclass.v v8, v8 -; CHECK-NEXT: ret - %0, - i64 %1) nounwind { -entry: - %a = call @llvm.riscv.vfclass.nxv4i32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfclass.mask.nxv4i32( - , - , - , - i64); - -define @intrinsic_vfclass_mask_v_nxv4i32_nxv4f32( -; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv4i32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu -; CHECK-NEXT: vfclass.v v8, v10, v0.t -; CHECK-NEXT: ret - %0, - %1, - %2, - i64 %3) nounwind { -entry: - %a = call @llvm.riscv.vfclass.mask.nxv4i32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfclass.nxv8i32( - , - i64); - -define @intrinsic_vfclass_v_nxv8i32_nxv8f32( -; CHECK-LABEL: intrinsic_vfclass_v_nxv8i32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfclass.v v8, v8 -; CHECK-NEXT: ret - %0, - i64 %1) nounwind { -entry: - %a = call @llvm.riscv.vfclass.nxv8i32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfclass.mask.nxv8i32( - , - , - , - i64); - -define @intrinsic_vfclass_mask_v_nxv8i32_nxv8f32( -; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv8i32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu -; CHECK-NEXT: vfclass.v v8, v12, v0.t -; CHECK-NEXT: ret - %0, - %1, - %2, - i64 %3) nounwind { -entry: - %a = call @llvm.riscv.vfclass.mask.nxv8i32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfclass.nxv16i32( - , - i64); - -define @intrinsic_vfclass_v_nxv16i32_nxv16f32( -; CHECK-LABEL: intrinsic_vfclass_v_nxv16i32_nxv16f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu -; CHECK-NEXT: vfclass.v v8, v8 -; CHECK-NEXT: ret - %0, - i64 %1) nounwind { -entry: - %a = call @llvm.riscv.vfclass.nxv16i32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfclass.mask.nxv16i32( - , - , - , - i64); - -define @intrinsic_vfclass_mask_v_nxv16i32_nxv16f32( -; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv16i32_nxv16f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu -; CHECK-NEXT: vfclass.v v8, v16, v0.t -; CHECK-NEXT: ret - %0, - %1, - %2, - i64 %3) nounwind { -entry: - %a = call @llvm.riscv.vfclass.mask.nxv16i32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfclass.nxv1i64( - , - i64); - -define @intrinsic_vfclass_v_nxv1i64_nxv1f64( -; CHECK-LABEL: intrinsic_vfclass_v_nxv1i64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vfclass.v v8, v8 -; CHECK-NEXT: ret - %0, - i64 %1) nounwind { -entry: - %a = call @llvm.riscv.vfclass.nxv1i64( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfclass.mask.nxv1i64( - , - , - , - i64); - -define @intrinsic_vfclass_mask_v_nxv1i64_nxv1f64( -; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv1i64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu -; CHECK-NEXT: vfclass.v v8, v9, v0.t -; CHECK-NEXT: ret - %0, - %1, - %2, - i64 %3) nounwind { -entry: - %a = call @llvm.riscv.vfclass.mask.nxv1i64( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfclass.nxv2i64( - , - i64); - -define @intrinsic_vfclass_v_nxv2i64_nxv2f64( -; CHECK-LABEL: intrinsic_vfclass_v_nxv2i64_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vfclass.v v8, v8 -; CHECK-NEXT: ret - %0, - i64 %1) nounwind { -entry: - %a = call @llvm.riscv.vfclass.nxv2i64( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfclass.mask.nxv2i64( - , - , - , - i64); - -define @intrinsic_vfclass_mask_v_nxv2i64_nxv2f64( -; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv2i64_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu -; CHECK-NEXT: vfclass.v v8, v10, v0.t -; CHECK-NEXT: ret - %0, - %1, - %2, - i64 %3) nounwind { -entry: - %a = call @llvm.riscv.vfclass.mask.nxv2i64( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfclass.nxv4i64( - , - i64); - -define @intrinsic_vfclass_v_nxv4i64_nxv4f64( -; CHECK-LABEL: intrinsic_vfclass_v_nxv4i64_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vfclass.v v8, v8 -; CHECK-NEXT: ret - %0, - i64 %1) nounwind { -entry: - %a = call @llvm.riscv.vfclass.nxv4i64( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfclass.mask.nxv4i64( - , - , - , - i64); - -define @intrinsic_vfclass_mask_v_nxv4i64_nxv4f64( -; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv4i64_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu -; CHECK-NEXT: vfclass.v v8, v12, v0.t -; CHECK-NEXT: ret - %0, - %1, - %2, - i64 %3) nounwind { -entry: - %a = call @llvm.riscv.vfclass.mask.nxv4i64( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfclass.nxv8i64( - , - i64); - -define @intrinsic_vfclass_v_nxv8i64_nxv8f64( -; CHECK-LABEL: intrinsic_vfclass_v_nxv8i64_nxv8f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu -; CHECK-NEXT: vfclass.v v8, v8 -; CHECK-NEXT: ret - %0, - i64 %1) nounwind { -entry: - %a = call @llvm.riscv.vfclass.nxv8i64( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfclass.mask.nxv8i64( - , - , - , - i64); - -define @intrinsic_vfclass_mask_v_nxv8i64_nxv8f64( -; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv8i64_nxv8f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu -; CHECK-NEXT: vfclass.v v8, v16, v0.t -; CHECK-NEXT: ret - %0, - %1, - %2, - i64 %3) nounwind { -entry: - %a = call @llvm.riscv.vfclass.mask.nxv8i64( - %0, - %1, - %2, - i64 %3) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfclass-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfclass.ll similarity index 91% rename from llvm/test/CodeGen/RISCV/rvv/vfclass-rv32.ll rename to llvm/test/CodeGen/RISCV/rvv/vfclass.ll index ae9df2aefa4d91fe90b8a306018f46482c9e624f..e6aa3983109567fd94010ebb6c5c65ad45881bc9 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfclass-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfclass.ll @@ -1,9 +1,11 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfclass.nxv1i16( , - i32); + iXLen); define @intrinsic_vfclass_v_nxv1i16_nxv1f16( ; CHECK-LABEL: intrinsic_vfclass_v_nxv1i16_nxv1f16: @@ -12,11 +14,11 @@ define @intrinsic_vfclass_v_nxv1i16_nxv1f16( ; CHECK-NEXT: vfclass.v v8, v8 ; CHECK-NEXT: ret %0, - i32 %1) nounwind { + iXLen %1) nounwind { entry: %a = call @llvm.riscv.vfclass.nxv1i16( %0, - i32 %1) + iXLen %1) ret %a } @@ -25,7 +27,7 @@ declare @llvm.riscv.vfclass.mask.nxv1i16( , , , - i32); + iXLen); define @intrinsic_vfclass_mask_v_nxv1i16_nxv1f16( ; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv1i16_nxv1f16: @@ -36,20 +38,20 @@ define @intrinsic_vfclass_mask_v_nxv1i16_nxv1f16( %0, %1, %2, - i32 %3) nounwind { + iXLen %3) nounwind { entry: %a = call @llvm.riscv.vfclass.mask.nxv1i16( %0, %1, %2, - i32 %3) + iXLen %3) ret %a } declare @llvm.riscv.vfclass.nxv2i16( , - i32); + iXLen); define @intrinsic_vfclass_v_nxv2i16_nxv2f16( ; CHECK-LABEL: intrinsic_vfclass_v_nxv2i16_nxv2f16: @@ -58,11 +60,11 @@ define @intrinsic_vfclass_v_nxv2i16_nxv2f16( ; CHECK-NEXT: vfclass.v v8, v8 ; CHECK-NEXT: ret %0, - i32 %1) nounwind { + iXLen %1) nounwind { entry: %a = call @llvm.riscv.vfclass.nxv2i16( %0, - i32 %1) + iXLen %1) ret %a } @@ -71,7 +73,7 @@ declare @llvm.riscv.vfclass.mask.nxv2i16( , , , - i32); + iXLen); define @intrinsic_vfclass_mask_v_nxv2i16_nxv2f16( ; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv2i16_nxv2f16: @@ -82,20 +84,20 @@ define @intrinsic_vfclass_mask_v_nxv2i16_nxv2f16( %0, %1, %2, - i32 %3) nounwind { + iXLen %3) nounwind { entry: %a = call @llvm.riscv.vfclass.mask.nxv2i16( %0, %1, %2, - i32 %3) + iXLen %3) ret %a } declare @llvm.riscv.vfclass.nxv4i16( , - i32); + iXLen); define @intrinsic_vfclass_v_nxv4i16_nxv4f16( ; CHECK-LABEL: intrinsic_vfclass_v_nxv4i16_nxv4f16: @@ -104,11 +106,11 @@ define @intrinsic_vfclass_v_nxv4i16_nxv4f16( ; CHECK-NEXT: vfclass.v v8, v8 ; CHECK-NEXT: ret %0, - i32 %1) nounwind { + iXLen %1) nounwind { entry: %a = call @llvm.riscv.vfclass.nxv4i16( %0, - i32 %1) + iXLen %1) ret %a } @@ -117,7 +119,7 @@ declare @llvm.riscv.vfclass.mask.nxv4i16( , , , - i32); + iXLen); define @intrinsic_vfclass_mask_v_nxv4i16_nxv4f16( ; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv4i16_nxv4f16: @@ -128,20 +130,20 @@ define @intrinsic_vfclass_mask_v_nxv4i16_nxv4f16( %0, %1, %2, - i32 %3) nounwind { + iXLen %3) nounwind { entry: %a = call @llvm.riscv.vfclass.mask.nxv4i16( %0, %1, %2, - i32 %3) + iXLen %3) ret %a } declare @llvm.riscv.vfclass.nxv8i16( , - i32); + iXLen); define @intrinsic_vfclass_v_nxv8i16_nxv8f16( ; CHECK-LABEL: intrinsic_vfclass_v_nxv8i16_nxv8f16: @@ -150,11 +152,11 @@ define @intrinsic_vfclass_v_nxv8i16_nxv8f16( ; CHECK-NEXT: vfclass.v v8, v8 ; CHECK-NEXT: ret %0, - i32 %1) nounwind { + iXLen %1) nounwind { entry: %a = call @llvm.riscv.vfclass.nxv8i16( %0, - i32 %1) + iXLen %1) ret %a } @@ -163,7 +165,7 @@ declare @llvm.riscv.vfclass.mask.nxv8i16( , , , - i32); + iXLen); define @intrinsic_vfclass_mask_v_nxv8i16_nxv8f16( ; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv8i16_nxv8f16: @@ -174,20 +176,20 @@ define @intrinsic_vfclass_mask_v_nxv8i16_nxv8f16( %0, %1, %2, - i32 %3) nounwind { + iXLen %3) nounwind { entry: %a = call @llvm.riscv.vfclass.mask.nxv8i16( %0, %1, %2, - i32 %3) + iXLen %3) ret %a } declare @llvm.riscv.vfclass.nxv16i16( , - i32); + iXLen); define @intrinsic_vfclass_v_nxv16i16_nxv16f16( ; CHECK-LABEL: intrinsic_vfclass_v_nxv16i16_nxv16f16: @@ -196,11 +198,11 @@ define @intrinsic_vfclass_v_nxv16i16_nxv16f16( ; CHECK-NEXT: vfclass.v v8, v8 ; CHECK-NEXT: ret %0, - i32 %1) nounwind { + iXLen %1) nounwind { entry: %a = call @llvm.riscv.vfclass.nxv16i16( %0, - i32 %1) + iXLen %1) ret %a } @@ -209,7 +211,7 @@ declare @llvm.riscv.vfclass.mask.nxv16i16( , , , - i32); + iXLen); define @intrinsic_vfclass_mask_v_nxv16i16_nxv16f16( ; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv16i16_nxv16f16: @@ -220,20 +222,20 @@ define @intrinsic_vfclass_mask_v_nxv16i16_nxv16f16( %0, %1, %2, - i32 %3) nounwind { + iXLen %3) nounwind { entry: %a = call @llvm.riscv.vfclass.mask.nxv16i16( %0, %1, %2, - i32 %3) + iXLen %3) ret %a } declare @llvm.riscv.vfclass.nxv32i16( , - i32); + iXLen); define @intrinsic_vfclass_v_nxv32i16_nxv32f16( ; CHECK-LABEL: intrinsic_vfclass_v_nxv32i16_nxv32f16: @@ -242,11 +244,11 @@ define @intrinsic_vfclass_v_nxv32i16_nxv32f16( ; CHECK-NEXT: vfclass.v v8, v8 ; CHECK-NEXT: ret %0, - i32 %1) nounwind { + iXLen %1) nounwind { entry: %a = call @llvm.riscv.vfclass.nxv32i16( %0, - i32 %1) + iXLen %1) ret %a } @@ -255,7 +257,7 @@ declare @llvm.riscv.vfclass.mask.nxv32i16( , , , - i32); + iXLen); define @intrinsic_vfclass_mask_v_nxv32i16_nxv32f16( ; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv32i16_nxv32f16: @@ -266,20 +268,20 @@ define @intrinsic_vfclass_mask_v_nxv32i16_nxv32f16( %0, %1, %2, - i32 %3) nounwind { + iXLen %3) nounwind { entry: %a = call @llvm.riscv.vfclass.mask.nxv32i16( %0, %1, %2, - i32 %3) + iXLen %3) ret %a } declare @llvm.riscv.vfclass.nxv1i32( , - i32); + iXLen); define @intrinsic_vfclass_v_nxv1i32_nxv1f32( ; CHECK-LABEL: intrinsic_vfclass_v_nxv1i32_nxv1f32: @@ -288,11 +290,11 @@ define @intrinsic_vfclass_v_nxv1i32_nxv1f32( ; CHECK-NEXT: vfclass.v v8, v8 ; CHECK-NEXT: ret %0, - i32 %1) nounwind { + iXLen %1) nounwind { entry: %a = call @llvm.riscv.vfclass.nxv1i32( %0, - i32 %1) + iXLen %1) ret %a } @@ -301,7 +303,7 @@ declare @llvm.riscv.vfclass.mask.nxv1i32( , , , - i32); + iXLen); define @intrinsic_vfclass_mask_v_nxv1i32_nxv1f32( ; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv1i32_nxv1f32: @@ -312,20 +314,20 @@ define @intrinsic_vfclass_mask_v_nxv1i32_nxv1f32( %0, %1, %2, - i32 %3) nounwind { + iXLen %3) nounwind { entry: %a = call @llvm.riscv.vfclass.mask.nxv1i32( %0, %1, %2, - i32 %3) + iXLen %3) ret %a } declare @llvm.riscv.vfclass.nxv2i32( , - i32); + iXLen); define @intrinsic_vfclass_v_nxv2i32_nxv2f32( ; CHECK-LABEL: intrinsic_vfclass_v_nxv2i32_nxv2f32: @@ -334,11 +336,11 @@ define @intrinsic_vfclass_v_nxv2i32_nxv2f32( ; CHECK-NEXT: vfclass.v v8, v8 ; CHECK-NEXT: ret %0, - i32 %1) nounwind { + iXLen %1) nounwind { entry: %a = call @llvm.riscv.vfclass.nxv2i32( %0, - i32 %1) + iXLen %1) ret %a } @@ -347,7 +349,7 @@ declare @llvm.riscv.vfclass.mask.nxv2i32( , , , - i32); + iXLen); define @intrinsic_vfclass_mask_v_nxv2i32_nxv2f32( ; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv2i32_nxv2f32: @@ -358,20 +360,20 @@ define @intrinsic_vfclass_mask_v_nxv2i32_nxv2f32( %0, %1, %2, - i32 %3) nounwind { + iXLen %3) nounwind { entry: %a = call @llvm.riscv.vfclass.mask.nxv2i32( %0, %1, %2, - i32 %3) + iXLen %3) ret %a } declare @llvm.riscv.vfclass.nxv4i32( , - i32); + iXLen); define @intrinsic_vfclass_v_nxv4i32_nxv4f32( ; CHECK-LABEL: intrinsic_vfclass_v_nxv4i32_nxv4f32: @@ -380,11 +382,11 @@ define @intrinsic_vfclass_v_nxv4i32_nxv4f32( ; CHECK-NEXT: vfclass.v v8, v8 ; CHECK-NEXT: ret %0, - i32 %1) nounwind { + iXLen %1) nounwind { entry: %a = call @llvm.riscv.vfclass.nxv4i32( %0, - i32 %1) + iXLen %1) ret %a } @@ -393,7 +395,7 @@ declare @llvm.riscv.vfclass.mask.nxv4i32( , , , - i32); + iXLen); define @intrinsic_vfclass_mask_v_nxv4i32_nxv4f32( ; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv4i32_nxv4f32: @@ -404,20 +406,20 @@ define @intrinsic_vfclass_mask_v_nxv4i32_nxv4f32( %0, %1, %2, - i32 %3) nounwind { + iXLen %3) nounwind { entry: %a = call @llvm.riscv.vfclass.mask.nxv4i32( %0, %1, %2, - i32 %3) + iXLen %3) ret %a } declare @llvm.riscv.vfclass.nxv8i32( , - i32); + iXLen); define @intrinsic_vfclass_v_nxv8i32_nxv8f32( ; CHECK-LABEL: intrinsic_vfclass_v_nxv8i32_nxv8f32: @@ -426,11 +428,11 @@ define @intrinsic_vfclass_v_nxv8i32_nxv8f32( ; CHECK-NEXT: vfclass.v v8, v8 ; CHECK-NEXT: ret %0, - i32 %1) nounwind { + iXLen %1) nounwind { entry: %a = call @llvm.riscv.vfclass.nxv8i32( %0, - i32 %1) + iXLen %1) ret %a } @@ -439,7 +441,7 @@ declare @llvm.riscv.vfclass.mask.nxv8i32( , , , - i32); + iXLen); define @intrinsic_vfclass_mask_v_nxv8i32_nxv8f32( ; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv8i32_nxv8f32: @@ -450,20 +452,20 @@ define @intrinsic_vfclass_mask_v_nxv8i32_nxv8f32( %0, %1, %2, - i32 %3) nounwind { + iXLen %3) nounwind { entry: %a = call @llvm.riscv.vfclass.mask.nxv8i32( %0, %1, %2, - i32 %3) + iXLen %3) ret %a } declare @llvm.riscv.vfclass.nxv16i32( , - i32); + iXLen); define @intrinsic_vfclass_v_nxv16i32_nxv16f32( ; CHECK-LABEL: intrinsic_vfclass_v_nxv16i32_nxv16f32: @@ -472,11 +474,11 @@ define @intrinsic_vfclass_v_nxv16i32_nxv16f32( ; CHECK-NEXT: vfclass.v v8, v8 ; CHECK-NEXT: ret %0, - i32 %1) nounwind { + iXLen %1) nounwind { entry: %a = call @llvm.riscv.vfclass.nxv16i32( %0, - i32 %1) + iXLen %1) ret %a } @@ -485,7 +487,7 @@ declare @llvm.riscv.vfclass.mask.nxv16i32( , , , - i32); + iXLen); define @intrinsic_vfclass_mask_v_nxv16i32_nxv16f32( ; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv16i32_nxv16f32: @@ -496,20 +498,20 @@ define @intrinsic_vfclass_mask_v_nxv16i32_nxv16f32( %0, %1, %2, - i32 %3) nounwind { + iXLen %3) nounwind { entry: %a = call @llvm.riscv.vfclass.mask.nxv16i32( %0, %1, %2, - i32 %3) + iXLen %3) ret %a } declare @llvm.riscv.vfclass.nxv1i64( , - i32); + iXLen); define @intrinsic_vfclass_v_nxv1i64_nxv1f64( ; CHECK-LABEL: intrinsic_vfclass_v_nxv1i64_nxv1f64: @@ -518,11 +520,11 @@ define @intrinsic_vfclass_v_nxv1i64_nxv1f64( ; CHECK-NEXT: vfclass.v v8, v8 ; CHECK-NEXT: ret %0, - i32 %1) nounwind { + iXLen %1) nounwind { entry: %a = call @llvm.riscv.vfclass.nxv1i64( %0, - i32 %1) + iXLen %1) ret %a } @@ -531,7 +533,7 @@ declare @llvm.riscv.vfclass.mask.nxv1i64( , , , - i32); + iXLen); define @intrinsic_vfclass_mask_v_nxv1i64_nxv1f64( ; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv1i64_nxv1f64: @@ -542,20 +544,20 @@ define @intrinsic_vfclass_mask_v_nxv1i64_nxv1f64( %0, %1, %2, - i32 %3) nounwind { + iXLen %3) nounwind { entry: %a = call @llvm.riscv.vfclass.mask.nxv1i64( %0, %1, %2, - i32 %3) + iXLen %3) ret %a } declare @llvm.riscv.vfclass.nxv2i64( , - i32); + iXLen); define @intrinsic_vfclass_v_nxv2i64_nxv2f64( ; CHECK-LABEL: intrinsic_vfclass_v_nxv2i64_nxv2f64: @@ -564,11 +566,11 @@ define @intrinsic_vfclass_v_nxv2i64_nxv2f64( ; CHECK-NEXT: vfclass.v v8, v8 ; CHECK-NEXT: ret %0, - i32 %1) nounwind { + iXLen %1) nounwind { entry: %a = call @llvm.riscv.vfclass.nxv2i64( %0, - i32 %1) + iXLen %1) ret %a } @@ -577,7 +579,7 @@ declare @llvm.riscv.vfclass.mask.nxv2i64( , , , - i32); + iXLen); define @intrinsic_vfclass_mask_v_nxv2i64_nxv2f64( ; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv2i64_nxv2f64: @@ -588,20 +590,20 @@ define @intrinsic_vfclass_mask_v_nxv2i64_nxv2f64( %0, %1, %2, - i32 %3) nounwind { + iXLen %3) nounwind { entry: %a = call @llvm.riscv.vfclass.mask.nxv2i64( %0, %1, %2, - i32 %3) + iXLen %3) ret %a } declare @llvm.riscv.vfclass.nxv4i64( , - i32); + iXLen); define @intrinsic_vfclass_v_nxv4i64_nxv4f64( ; CHECK-LABEL: intrinsic_vfclass_v_nxv4i64_nxv4f64: @@ -610,11 +612,11 @@ define @intrinsic_vfclass_v_nxv4i64_nxv4f64( ; CHECK-NEXT: vfclass.v v8, v8 ; CHECK-NEXT: ret %0, - i32 %1) nounwind { + iXLen %1) nounwind { entry: %a = call @llvm.riscv.vfclass.nxv4i64( %0, - i32 %1) + iXLen %1) ret %a } @@ -623,7 +625,7 @@ declare @llvm.riscv.vfclass.mask.nxv4i64( , , , - i32); + iXLen); define @intrinsic_vfclass_mask_v_nxv4i64_nxv4f64( ; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv4i64_nxv4f64: @@ -634,20 +636,20 @@ define @intrinsic_vfclass_mask_v_nxv4i64_nxv4f64( %0, %1, %2, - i32 %3) nounwind { + iXLen %3) nounwind { entry: %a = call @llvm.riscv.vfclass.mask.nxv4i64( %0, %1, %2, - i32 %3) + iXLen %3) ret %a } declare @llvm.riscv.vfclass.nxv8i64( , - i32); + iXLen); define @intrinsic_vfclass_v_nxv8i64_nxv8f64( ; CHECK-LABEL: intrinsic_vfclass_v_nxv8i64_nxv8f64: @@ -656,11 +658,11 @@ define @intrinsic_vfclass_v_nxv8i64_nxv8f64( ; CHECK-NEXT: vfclass.v v8, v8 ; CHECK-NEXT: ret %0, - i32 %1) nounwind { + iXLen %1) nounwind { entry: %a = call @llvm.riscv.vfclass.nxv8i64( %0, - i32 %1) + iXLen %1) ret %a } @@ -669,7 +671,7 @@ declare @llvm.riscv.vfclass.mask.nxv8i64( , , , - i32); + iXLen); define @intrinsic_vfclass_mask_v_nxv8i64_nxv8f64( ; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv8i64_nxv8f64: @@ -680,13 +682,13 @@ define @intrinsic_vfclass_mask_v_nxv8i64_nxv8f64( %0, %1, %2, - i32 %3) nounwind { + iXLen %3) nounwind { entry: %a = call @llvm.riscv.vfclass.mask.nxv8i64( %0, %1, %2, - i32 %3) + iXLen %3) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfcvt-f-x-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfcvt-f-x-rv64.ll deleted file mode 100644 index 65270dc06336de53086e4b1a5f670116ab3543c4..0000000000000000000000000000000000000000 --- a/llvm/test/CodeGen/RISCV/rvv/vfcvt-f-x-rv64.ll +++ /dev/null @@ -1,617 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s -declare @llvm.riscv.vfcvt.f.x.v.nxv1f16.nxv1i16( - , - i64); - -define @intrinsic_vfcvt_f.x.v_nxv1f16_nxv1i16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv1f16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfcvt.f.x.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.f.x.v.nxv1f16.nxv1i16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.f.x.v.mask.nxv1f16.nxv1i16( - , - , - , - i64, - i64); - -define @intrinsic_vfcvt_mask_f.x.v_nxv1f16_nxv1i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv1f16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfcvt.f.x.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.f.x.v.mask.nxv1f16.nxv1i16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.f.x.v.nxv2f16.nxv2i16( - , - i64); - -define @intrinsic_vfcvt_f.x.v_nxv2f16_nxv2i16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv2f16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfcvt.f.x.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.f.x.v.nxv2f16.nxv2i16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.f.x.v.mask.nxv2f16.nxv2i16( - , - , - , - i64, - i64); - -define @intrinsic_vfcvt_mask_f.x.v_nxv2f16_nxv2i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv2f16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfcvt.f.x.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.f.x.v.mask.nxv2f16.nxv2i16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.f.x.v.nxv4f16.nxv4i16( - , - i64); - -define @intrinsic_vfcvt_f.x.v_nxv4f16_nxv4i16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv4f16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfcvt.f.x.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.f.x.v.nxv4f16.nxv4i16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.f.x.v.mask.nxv4f16.nxv4i16( - , - , - , - i64, - i64); - -define @intrinsic_vfcvt_mask_f.x.v_nxv4f16_nxv4i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv4f16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfcvt.f.x.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.f.x.v.mask.nxv4f16.nxv4i16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.f.x.v.nxv8f16.nxv8i16( - , - i64); - -define @intrinsic_vfcvt_f.x.v_nxv8f16_nxv8i16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv8f16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfcvt.f.x.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.f.x.v.nxv8f16.nxv8i16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.f.x.v.mask.nxv8f16.nxv8i16( - , - , - , - i64, - i64); - -define @intrinsic_vfcvt_mask_f.x.v_nxv8f16_nxv8i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv8f16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfcvt.f.x.v v8, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.f.x.v.mask.nxv8f16.nxv8i16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.f.x.v.nxv16f16.nxv16i16( - , - i64); - -define @intrinsic_vfcvt_f.x.v_nxv16f16_nxv16i16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv16f16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfcvt.f.x.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.f.x.v.nxv16f16.nxv16i16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.f.x.v.mask.nxv16f16.nxv16i16( - , - , - , - i64, - i64); - -define @intrinsic_vfcvt_mask_f.x.v_nxv16f16_nxv16i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv16f16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfcvt.f.x.v v8, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.f.x.v.mask.nxv16f16.nxv16i16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.f.x.v.nxv32f16.nxv32i16( - , - i64); - -define @intrinsic_vfcvt_f.x.v_nxv32f16_nxv32i16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv32f16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu -; CHECK-NEXT: vfcvt.f.x.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.f.x.v.nxv32f16.nxv32i16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.f.x.v.mask.nxv32f16.nxv32i16( - , - , - , - i64, - i64); - -define @intrinsic_vfcvt_mask_f.x.v_nxv32f16_nxv32i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv32f16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu -; CHECK-NEXT: vfcvt.f.x.v v8, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.f.x.v.mask.nxv32f16.nxv32i16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.f.x.v.nxv1f32.nxv1i32( - , - i64); - -define @intrinsic_vfcvt_f.x.v_nxv1f32_nxv1i32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv1f32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfcvt.f.x.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.f.x.v.nxv1f32.nxv1i32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.f.x.v.mask.nxv1f32.nxv1i32( - , - , - , - i64, - i64); - -define @intrinsic_vfcvt_mask_f.x.v_nxv1f32_nxv1i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv1f32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfcvt.f.x.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.f.x.v.mask.nxv1f32.nxv1i32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.f.x.v.nxv2f32.nxv2i32( - , - i64); - -define @intrinsic_vfcvt_f.x.v_nxv2f32_nxv2i32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv2f32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfcvt.f.x.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.f.x.v.nxv2f32.nxv2i32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.f.x.v.mask.nxv2f32.nxv2i32( - , - , - , - i64, - i64); - -define @intrinsic_vfcvt_mask_f.x.v_nxv2f32_nxv2i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv2f32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfcvt.f.x.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.f.x.v.mask.nxv2f32.nxv2i32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.f.x.v.nxv4f32.nxv4i32( - , - i64); - -define @intrinsic_vfcvt_f.x.v_nxv4f32_nxv4i32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv4f32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfcvt.f.x.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.f.x.v.nxv4f32.nxv4i32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.f.x.v.mask.nxv4f32.nxv4i32( - , - , - , - i64, - i64); - -define @intrinsic_vfcvt_mask_f.x.v_nxv4f32_nxv4i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv4f32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfcvt.f.x.v v8, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.f.x.v.mask.nxv4f32.nxv4i32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.f.x.v.nxv8f32.nxv8i32( - , - i64); - -define @intrinsic_vfcvt_f.x.v_nxv8f32_nxv8i32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv8f32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfcvt.f.x.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.f.x.v.nxv8f32.nxv8i32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.f.x.v.mask.nxv8f32.nxv8i32( - , - , - , - i64, - i64); - -define @intrinsic_vfcvt_mask_f.x.v_nxv8f32_nxv8i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv8f32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfcvt.f.x.v v8, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.f.x.v.mask.nxv8f32.nxv8i32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.f.x.v.nxv16f32.nxv16i32( - , - i64); - -define @intrinsic_vfcvt_f.x.v_nxv16f32_nxv16i32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv16f32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu -; CHECK-NEXT: vfcvt.f.x.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.f.x.v.nxv16f32.nxv16i32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.f.x.v.mask.nxv16f32.nxv16i32( - , - , - , - i64, - i64); - -define @intrinsic_vfcvt_mask_f.x.v_nxv16f32_nxv16i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv16f32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu -; CHECK-NEXT: vfcvt.f.x.v v8, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.f.x.v.mask.nxv16f32.nxv16i32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.f.x.v.nxv1f64.nxv1i64( - , - i64); - -define @intrinsic_vfcvt_f.x.v_nxv1f64_nxv1i64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv1f64_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vfcvt.f.x.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.f.x.v.nxv1f64.nxv1i64( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.f.x.v.mask.nxv1f64.nxv1i64( - , - , - , - i64, - i64); - -define @intrinsic_vfcvt_mask_f.x.v_nxv1f64_nxv1i64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv1f64_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vfcvt.f.x.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.f.x.v.mask.nxv1f64.nxv1i64( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.f.x.v.nxv2f64.nxv2i64( - , - i64); - -define @intrinsic_vfcvt_f.x.v_nxv2f64_nxv2i64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv2f64_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vfcvt.f.x.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.f.x.v.nxv2f64.nxv2i64( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.f.x.v.mask.nxv2f64.nxv2i64( - , - , - , - i64, - i64); - -define @intrinsic_vfcvt_mask_f.x.v_nxv2f64_nxv2i64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv2f64_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vfcvt.f.x.v v8, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.f.x.v.mask.nxv2f64.nxv2i64( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.f.x.v.nxv4f64.nxv4i64( - , - i64); - -define @intrinsic_vfcvt_f.x.v_nxv4f64_nxv4i64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv4f64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vfcvt.f.x.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.f.x.v.nxv4f64.nxv4i64( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.f.x.v.mask.nxv4f64.nxv4i64( - , - , - , - i64, - i64); - -define @intrinsic_vfcvt_mask_f.x.v_nxv4f64_nxv4i64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv4f64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vfcvt.f.x.v v8, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.f.x.v.mask.nxv4f64.nxv4i64( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.f.x.v.nxv8f64.nxv8i64( - , - i64); - -define @intrinsic_vfcvt_f.x.v_nxv8f64_nxv8i64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv8f64_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu -; CHECK-NEXT: vfcvt.f.x.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.f.x.v.nxv8f64.nxv8i64( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.f.x.v.mask.nxv8f64.nxv8i64( - , - , - , - i64, - i64); - -define @intrinsic_vfcvt_mask_f.x.v_nxv8f64_nxv8i64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv8f64_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu -; CHECK-NEXT: vfcvt.f.x.v v8, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.f.x.v.mask.nxv8f64.nxv8i64( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfcvt-f-x-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfcvt-f-x.ll similarity index 87% rename from llvm/test/CodeGen/RISCV/rvv/vfcvt-f-x-rv32.ll rename to llvm/test/CodeGen/RISCV/rvv/vfcvt-f-x.ll index 5549960bb7736273188ae28cfe791764702c6fcf..e8d6257f1e65282d0cf5cf5c31941511810730d8 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfcvt-f-x-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfcvt-f-x.ll @@ -1,11 +1,13 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfcvt.f.x.v.nxv1f16.nxv1i16( , - i32); + iXLen); -define @intrinsic_vfcvt_f.x.v_nxv1f16_nxv1i16( %0, i32 %1) nounwind { +define @intrinsic_vfcvt_f.x.v_nxv1f16_nxv1i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -14,7 +16,7 @@ define @intrinsic_vfcvt_f.x.v_nxv1f16_nxv1i16( @llvm.riscv.vfcvt.f.x.v.nxv1f16.nxv1i16( %0, - i32 %1) + iXLen %1) ret %a } @@ -23,10 +25,10 @@ declare @llvm.riscv.vfcvt.f.x.v.mask.nxv1f16.nxv1i16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_f.x.v_nxv1f16_nxv1i16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfcvt_mask_f.x.v_nxv1f16_nxv1i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -37,16 +39,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.f.x.v.nxv2f16.nxv2i16( , - i32); + iXLen); -define @intrinsic_vfcvt_f.x.v_nxv2f16_nxv2i16( %0, i32 %1) nounwind { +define @intrinsic_vfcvt_f.x.v_nxv2f16_nxv2i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -55,7 +57,7 @@ define @intrinsic_vfcvt_f.x.v_nxv2f16_nxv2i16( @llvm.riscv.vfcvt.f.x.v.nxv2f16.nxv2i16( %0, - i32 %1) + iXLen %1) ret %a } @@ -64,10 +66,10 @@ declare @llvm.riscv.vfcvt.f.x.v.mask.nxv2f16.nxv2i16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_f.x.v_nxv2f16_nxv2i16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfcvt_mask_f.x.v_nxv2f16_nxv2i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -78,16 +80,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.f.x.v.nxv4f16.nxv4i16( , - i32); + iXLen); -define @intrinsic_vfcvt_f.x.v_nxv4f16_nxv4i16( %0, i32 %1) nounwind { +define @intrinsic_vfcvt_f.x.v_nxv4f16_nxv4i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -96,7 +98,7 @@ define @intrinsic_vfcvt_f.x.v_nxv4f16_nxv4i16( @llvm.riscv.vfcvt.f.x.v.nxv4f16.nxv4i16( %0, - i32 %1) + iXLen %1) ret %a } @@ -105,10 +107,10 @@ declare @llvm.riscv.vfcvt.f.x.v.mask.nxv4f16.nxv4i16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_f.x.v_nxv4f16_nxv4i16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfcvt_mask_f.x.v_nxv4f16_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -119,16 +121,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.f.x.v.nxv8f16.nxv8i16( , - i32); + iXLen); -define @intrinsic_vfcvt_f.x.v_nxv8f16_nxv8i16( %0, i32 %1) nounwind { +define @intrinsic_vfcvt_f.x.v_nxv8f16_nxv8i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv8f16_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -137,7 +139,7 @@ define @intrinsic_vfcvt_f.x.v_nxv8f16_nxv8i16( @llvm.riscv.vfcvt.f.x.v.nxv8f16.nxv8i16( %0, - i32 %1) + iXLen %1) ret %a } @@ -146,10 +148,10 @@ declare @llvm.riscv.vfcvt.f.x.v.mask.nxv8f16.nxv8i16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_f.x.v_nxv8f16_nxv8i16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfcvt_mask_f.x.v_nxv8f16_nxv8i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv8f16_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -160,16 +162,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.f.x.v.nxv16f16.nxv16i16( , - i32); + iXLen); -define @intrinsic_vfcvt_f.x.v_nxv16f16_nxv16i16( %0, i32 %1) nounwind { +define @intrinsic_vfcvt_f.x.v_nxv16f16_nxv16i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv16f16_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -178,7 +180,7 @@ define @intrinsic_vfcvt_f.x.v_nxv16f16_nxv16i16( @llvm.riscv.vfcvt.f.x.v.nxv16f16.nxv16i16( %0, - i32 %1) + iXLen %1) ret %a } @@ -187,10 +189,10 @@ declare @llvm.riscv.vfcvt.f.x.v.mask.nxv16f16.nxv16i16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_f.x.v_nxv16f16_nxv16i16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfcvt_mask_f.x.v_nxv16f16_nxv16i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv16f16_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -201,16 +203,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.f.x.v.nxv32f16.nxv32i16( , - i32); + iXLen); -define @intrinsic_vfcvt_f.x.v_nxv32f16_nxv32i16( %0, i32 %1) nounwind { +define @intrinsic_vfcvt_f.x.v_nxv32f16_nxv32i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv32f16_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu @@ -219,7 +221,7 @@ define @intrinsic_vfcvt_f.x.v_nxv32f16_nxv32i16( @llvm.riscv.vfcvt.f.x.v.nxv32f16.nxv32i16( %0, - i32 %1) + iXLen %1) ret %a } @@ -228,10 +230,10 @@ declare @llvm.riscv.vfcvt.f.x.v.mask.nxv32f16.nxv32i16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_f.x.v_nxv32f16_nxv32i16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfcvt_mask_f.x.v_nxv32f16_nxv32i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv32f16_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu @@ -242,16 +244,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.f.x.v.nxv1f32.nxv1i32( , - i32); + iXLen); -define @intrinsic_vfcvt_f.x.v_nxv1f32_nxv1i32( %0, i32 %1) nounwind { +define @intrinsic_vfcvt_f.x.v_nxv1f32_nxv1i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -260,7 +262,7 @@ define @intrinsic_vfcvt_f.x.v_nxv1f32_nxv1i32( @llvm.riscv.vfcvt.f.x.v.nxv1f32.nxv1i32( %0, - i32 %1) + iXLen %1) ret %a } @@ -269,10 +271,10 @@ declare @llvm.riscv.vfcvt.f.x.v.mask.nxv1f32.nxv1i32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_f.x.v_nxv1f32_nxv1i32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfcvt_mask_f.x.v_nxv1f32_nxv1i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -283,16 +285,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.f.x.v.nxv2f32.nxv2i32( , - i32); + iXLen); -define @intrinsic_vfcvt_f.x.v_nxv2f32_nxv2i32( %0, i32 %1) nounwind { +define @intrinsic_vfcvt_f.x.v_nxv2f32_nxv2i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -301,7 +303,7 @@ define @intrinsic_vfcvt_f.x.v_nxv2f32_nxv2i32( @llvm.riscv.vfcvt.f.x.v.nxv2f32.nxv2i32( %0, - i32 %1) + iXLen %1) ret %a } @@ -310,10 +312,10 @@ declare @llvm.riscv.vfcvt.f.x.v.mask.nxv2f32.nxv2i32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_f.x.v_nxv2f32_nxv2i32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfcvt_mask_f.x.v_nxv2f32_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -324,16 +326,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.f.x.v.nxv4f32.nxv4i32( , - i32); + iXLen); -define @intrinsic_vfcvt_f.x.v_nxv4f32_nxv4i32( %0, i32 %1) nounwind { +define @intrinsic_vfcvt_f.x.v_nxv4f32_nxv4i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv4f32_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -342,7 +344,7 @@ define @intrinsic_vfcvt_f.x.v_nxv4f32_nxv4i32( @llvm.riscv.vfcvt.f.x.v.nxv4f32.nxv4i32( %0, - i32 %1) + iXLen %1) ret %a } @@ -351,10 +353,10 @@ declare @llvm.riscv.vfcvt.f.x.v.mask.nxv4f32.nxv4i32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_f.x.v_nxv4f32_nxv4i32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfcvt_mask_f.x.v_nxv4f32_nxv4i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv4f32_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -365,16 +367,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.f.x.v.nxv8f32.nxv8i32( , - i32); + iXLen); -define @intrinsic_vfcvt_f.x.v_nxv8f32_nxv8i32( %0, i32 %1) nounwind { +define @intrinsic_vfcvt_f.x.v_nxv8f32_nxv8i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv8f32_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -383,7 +385,7 @@ define @intrinsic_vfcvt_f.x.v_nxv8f32_nxv8i32( @llvm.riscv.vfcvt.f.x.v.nxv8f32.nxv8i32( %0, - i32 %1) + iXLen %1) ret %a } @@ -392,10 +394,10 @@ declare @llvm.riscv.vfcvt.f.x.v.mask.nxv8f32.nxv8i32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_f.x.v_nxv8f32_nxv8i32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfcvt_mask_f.x.v_nxv8f32_nxv8i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv8f32_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -406,16 +408,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.f.x.v.nxv16f32.nxv16i32( , - i32); + iXLen); -define @intrinsic_vfcvt_f.x.v_nxv16f32_nxv16i32( %0, i32 %1) nounwind { +define @intrinsic_vfcvt_f.x.v_nxv16f32_nxv16i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv16f32_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu @@ -424,7 +426,7 @@ define @intrinsic_vfcvt_f.x.v_nxv16f32_nxv16i32( @llvm.riscv.vfcvt.f.x.v.nxv16f32.nxv16i32( %0, - i32 %1) + iXLen %1) ret %a } @@ -433,10 +435,10 @@ declare @llvm.riscv.vfcvt.f.x.v.mask.nxv16f32.nxv16i32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_f.x.v_nxv16f32_nxv16i32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfcvt_mask_f.x.v_nxv16f32_nxv16i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv16f32_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu @@ -447,16 +449,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.f.x.v.nxv1f64.nxv1i64( , - i32); + iXLen); -define @intrinsic_vfcvt_f.x.v_nxv1f64_nxv1i64( %0, i32 %1) nounwind { +define @intrinsic_vfcvt_f.x.v_nxv1f64_nxv1i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv1f64_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu @@ -465,7 +467,7 @@ define @intrinsic_vfcvt_f.x.v_nxv1f64_nxv1i64( @llvm.riscv.vfcvt.f.x.v.nxv1f64.nxv1i64( %0, - i32 %1) + iXLen %1) ret %a } @@ -474,10 +476,10 @@ declare @llvm.riscv.vfcvt.f.x.v.mask.nxv1f64.nxv1i64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_f.x.v_nxv1f64_nxv1i64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfcvt_mask_f.x.v_nxv1f64_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv1f64_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu @@ -488,16 +490,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.f.x.v.nxv2f64.nxv2i64( , - i32); + iXLen); -define @intrinsic_vfcvt_f.x.v_nxv2f64_nxv2i64( %0, i32 %1) nounwind { +define @intrinsic_vfcvt_f.x.v_nxv2f64_nxv2i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv2f64_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu @@ -506,7 +508,7 @@ define @intrinsic_vfcvt_f.x.v_nxv2f64_nxv2i64( @llvm.riscv.vfcvt.f.x.v.nxv2f64.nxv2i64( %0, - i32 %1) + iXLen %1) ret %a } @@ -515,10 +517,10 @@ declare @llvm.riscv.vfcvt.f.x.v.mask.nxv2f64.nxv2i64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_f.x.v_nxv2f64_nxv2i64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfcvt_mask_f.x.v_nxv2f64_nxv2i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv2f64_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu @@ -529,16 +531,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.f.x.v.nxv4f64.nxv4i64( , - i32); + iXLen); -define @intrinsic_vfcvt_f.x.v_nxv4f64_nxv4i64( %0, i32 %1) nounwind { +define @intrinsic_vfcvt_f.x.v_nxv4f64_nxv4i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv4f64_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu @@ -547,7 +549,7 @@ define @intrinsic_vfcvt_f.x.v_nxv4f64_nxv4i64( @llvm.riscv.vfcvt.f.x.v.nxv4f64.nxv4i64( %0, - i32 %1) + iXLen %1) ret %a } @@ -556,10 +558,10 @@ declare @llvm.riscv.vfcvt.f.x.v.mask.nxv4f64.nxv4i64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_f.x.v_nxv4f64_nxv4i64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfcvt_mask_f.x.v_nxv4f64_nxv4i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv4f64_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu @@ -570,16 +572,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.f.x.v.nxv8f64.nxv8i64( , - i32); + iXLen); -define @intrinsic_vfcvt_f.x.v_nxv8f64_nxv8i64( %0, i32 %1) nounwind { +define @intrinsic_vfcvt_f.x.v_nxv8f64_nxv8i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv8f64_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu @@ -588,7 +590,7 @@ define @intrinsic_vfcvt_f.x.v_nxv8f64_nxv8i64( @llvm.riscv.vfcvt.f.x.v.nxv8f64.nxv8i64( %0, - i32 %1) + iXLen %1) ret %a } @@ -597,10 +599,10 @@ declare @llvm.riscv.vfcvt.f.x.v.mask.nxv8f64.nxv8i64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_f.x.v_nxv8f64_nxv8i64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfcvt_mask_f.x.v_nxv8f64_nxv8i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv8f64_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu @@ -611,7 +613,7 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfcvt-f-xu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfcvt-f-xu-rv64.ll deleted file mode 100644 index 6fc87d15dac18bd6a6ce878fbec59a6bb9c9f02d..0000000000000000000000000000000000000000 --- a/llvm/test/CodeGen/RISCV/rvv/vfcvt-f-xu-rv64.ll +++ /dev/null @@ -1,617 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s -declare @llvm.riscv.vfcvt.f.xu.v.nxv1f16.nxv1i16( - , - i64); - -define @intrinsic_vfcvt_f.xu.v_nxv1f16_nxv1i16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv1f16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfcvt.f.xu.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.f.xu.v.nxv1f16.nxv1i16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.f.xu.v.mask.nxv1f16.nxv1i16( - , - , - , - i64, - i64); - -define @intrinsic_vfcvt_mask_f.xu.v_nxv1f16_nxv1i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv1f16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfcvt.f.xu.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv1f16.nxv1i16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.f.xu.v.nxv2f16.nxv2i16( - , - i64); - -define @intrinsic_vfcvt_f.xu.v_nxv2f16_nxv2i16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv2f16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfcvt.f.xu.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.f.xu.v.nxv2f16.nxv2i16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.f.xu.v.mask.nxv2f16.nxv2i16( - , - , - , - i64, - i64); - -define @intrinsic_vfcvt_mask_f.xu.v_nxv2f16_nxv2i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv2f16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfcvt.f.xu.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv2f16.nxv2i16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.f.xu.v.nxv4f16.nxv4i16( - , - i64); - -define @intrinsic_vfcvt_f.xu.v_nxv4f16_nxv4i16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv4f16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfcvt.f.xu.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.f.xu.v.nxv4f16.nxv4i16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.f.xu.v.mask.nxv4f16.nxv4i16( - , - , - , - i64, - i64); - -define @intrinsic_vfcvt_mask_f.xu.v_nxv4f16_nxv4i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv4f16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfcvt.f.xu.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv4f16.nxv4i16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.f.xu.v.nxv8f16.nxv8i16( - , - i64); - -define @intrinsic_vfcvt_f.xu.v_nxv8f16_nxv8i16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv8f16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfcvt.f.xu.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.f.xu.v.nxv8f16.nxv8i16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.f.xu.v.mask.nxv8f16.nxv8i16( - , - , - , - i64, - i64); - -define @intrinsic_vfcvt_mask_f.xu.v_nxv8f16_nxv8i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv8f16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfcvt.f.xu.v v8, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv8f16.nxv8i16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.f.xu.v.nxv16f16.nxv16i16( - , - i64); - -define @intrinsic_vfcvt_f.xu.v_nxv16f16_nxv16i16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv16f16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfcvt.f.xu.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.f.xu.v.nxv16f16.nxv16i16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.f.xu.v.mask.nxv16f16.nxv16i16( - , - , - , - i64, - i64); - -define @intrinsic_vfcvt_mask_f.xu.v_nxv16f16_nxv16i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv16f16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfcvt.f.xu.v v8, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv16f16.nxv16i16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.f.xu.v.nxv32f16.nxv32i16( - , - i64); - -define @intrinsic_vfcvt_f.xu.v_nxv32f16_nxv32i16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv32f16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu -; CHECK-NEXT: vfcvt.f.xu.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.f.xu.v.nxv32f16.nxv32i16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.f.xu.v.mask.nxv32f16.nxv32i16( - , - , - , - i64, - i64); - -define @intrinsic_vfcvt_mask_f.xu.v_nxv32f16_nxv32i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv32f16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu -; CHECK-NEXT: vfcvt.f.xu.v v8, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv32f16.nxv32i16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.f.xu.v.nxv1f32.nxv1i32( - , - i64); - -define @intrinsic_vfcvt_f.xu.v_nxv1f32_nxv1i32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv1f32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfcvt.f.xu.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.f.xu.v.nxv1f32.nxv1i32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.f.xu.v.mask.nxv1f32.nxv1i32( - , - , - , - i64, - i64); - -define @intrinsic_vfcvt_mask_f.xu.v_nxv1f32_nxv1i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv1f32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfcvt.f.xu.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv1f32.nxv1i32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.f.xu.v.nxv2f32.nxv2i32( - , - i64); - -define @intrinsic_vfcvt_f.xu.v_nxv2f32_nxv2i32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv2f32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfcvt.f.xu.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.f.xu.v.nxv2f32.nxv2i32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.f.xu.v.mask.nxv2f32.nxv2i32( - , - , - , - i64, - i64); - -define @intrinsic_vfcvt_mask_f.xu.v_nxv2f32_nxv2i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv2f32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfcvt.f.xu.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv2f32.nxv2i32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.f.xu.v.nxv4f32.nxv4i32( - , - i64); - -define @intrinsic_vfcvt_f.xu.v_nxv4f32_nxv4i32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv4f32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfcvt.f.xu.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.f.xu.v.nxv4f32.nxv4i32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.f.xu.v.mask.nxv4f32.nxv4i32( - , - , - , - i64, - i64); - -define @intrinsic_vfcvt_mask_f.xu.v_nxv4f32_nxv4i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv4f32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfcvt.f.xu.v v8, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv4f32.nxv4i32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.f.xu.v.nxv8f32.nxv8i32( - , - i64); - -define @intrinsic_vfcvt_f.xu.v_nxv8f32_nxv8i32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv8f32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfcvt.f.xu.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.f.xu.v.nxv8f32.nxv8i32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.f.xu.v.mask.nxv8f32.nxv8i32( - , - , - , - i64, - i64); - -define @intrinsic_vfcvt_mask_f.xu.v_nxv8f32_nxv8i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv8f32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfcvt.f.xu.v v8, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv8f32.nxv8i32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.f.xu.v.nxv16f32.nxv16i32( - , - i64); - -define @intrinsic_vfcvt_f.xu.v_nxv16f32_nxv16i32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv16f32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu -; CHECK-NEXT: vfcvt.f.xu.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.f.xu.v.nxv16f32.nxv16i32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.f.xu.v.mask.nxv16f32.nxv16i32( - , - , - , - i64, - i64); - -define @intrinsic_vfcvt_mask_f.xu.v_nxv16f32_nxv16i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv16f32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu -; CHECK-NEXT: vfcvt.f.xu.v v8, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv16f32.nxv16i32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.f.xu.v.nxv1f64.nxv1i64( - , - i64); - -define @intrinsic_vfcvt_f.xu.v_nxv1f64_nxv1i64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv1f64_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vfcvt.f.xu.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.f.xu.v.nxv1f64.nxv1i64( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.f.xu.v.mask.nxv1f64.nxv1i64( - , - , - , - i64, - i64); - -define @intrinsic_vfcvt_mask_f.xu.v_nxv1f64_nxv1i64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv1f64_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vfcvt.f.xu.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv1f64.nxv1i64( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.f.xu.v.nxv2f64.nxv2i64( - , - i64); - -define @intrinsic_vfcvt_f.xu.v_nxv2f64_nxv2i64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv2f64_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vfcvt.f.xu.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.f.xu.v.nxv2f64.nxv2i64( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.f.xu.v.mask.nxv2f64.nxv2i64( - , - , - , - i64, - i64); - -define @intrinsic_vfcvt_mask_f.xu.v_nxv2f64_nxv2i64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv2f64_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vfcvt.f.xu.v v8, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv2f64.nxv2i64( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.f.xu.v.nxv4f64.nxv4i64( - , - i64); - -define @intrinsic_vfcvt_f.xu.v_nxv4f64_nxv4i64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv4f64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vfcvt.f.xu.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.f.xu.v.nxv4f64.nxv4i64( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.f.xu.v.mask.nxv4f64.nxv4i64( - , - , - , - i64, - i64); - -define @intrinsic_vfcvt_mask_f.xu.v_nxv4f64_nxv4i64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv4f64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vfcvt.f.xu.v v8, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv4f64.nxv4i64( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.f.xu.v.nxv8f64.nxv8i64( - , - i64); - -define @intrinsic_vfcvt_f.xu.v_nxv8f64_nxv8i64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv8f64_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu -; CHECK-NEXT: vfcvt.f.xu.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.f.xu.v.nxv8f64.nxv8i64( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.f.xu.v.mask.nxv8f64.nxv8i64( - , - , - , - i64, - i64); - -define @intrinsic_vfcvt_mask_f.xu.v_nxv8f64_nxv8i64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv8f64_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu -; CHECK-NEXT: vfcvt.f.xu.v v8, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv8f64.nxv8i64( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfcvt-f-xu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfcvt-f-xu.ll similarity index 87% rename from llvm/test/CodeGen/RISCV/rvv/vfcvt-f-xu-rv32.ll rename to llvm/test/CodeGen/RISCV/rvv/vfcvt-f-xu.ll index 1c8c2a80c90db523b5367e3653814a356e67c84f..82ec8ca7cc74d560946fb8441f4da6be1c794849 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfcvt-f-xu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfcvt-f-xu.ll @@ -1,11 +1,13 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfcvt.f.xu.v.nxv1f16.nxv1i16( , - i32); + iXLen); -define @intrinsic_vfcvt_f.xu.v_nxv1f16_nxv1i16( %0, i32 %1) nounwind { +define @intrinsic_vfcvt_f.xu.v_nxv1f16_nxv1i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -14,7 +16,7 @@ define @intrinsic_vfcvt_f.xu.v_nxv1f16_nxv1i16( @llvm.riscv.vfcvt.f.xu.v.nxv1f16.nxv1i16( %0, - i32 %1) + iXLen %1) ret %a } @@ -23,10 +25,10 @@ declare @llvm.riscv.vfcvt.f.xu.v.mask.nxv1f16.nxv1i16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_f.xu.v_nxv1f16_nxv1i16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfcvt_mask_f.xu.v_nxv1f16_nxv1i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -37,16 +39,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.f.xu.v.nxv2f16.nxv2i16( , - i32); + iXLen); -define @intrinsic_vfcvt_f.xu.v_nxv2f16_nxv2i16( %0, i32 %1) nounwind { +define @intrinsic_vfcvt_f.xu.v_nxv2f16_nxv2i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -55,7 +57,7 @@ define @intrinsic_vfcvt_f.xu.v_nxv2f16_nxv2i16( @llvm.riscv.vfcvt.f.xu.v.nxv2f16.nxv2i16( %0, - i32 %1) + iXLen %1) ret %a } @@ -64,10 +66,10 @@ declare @llvm.riscv.vfcvt.f.xu.v.mask.nxv2f16.nxv2i16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_f.xu.v_nxv2f16_nxv2i16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfcvt_mask_f.xu.v_nxv2f16_nxv2i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -78,16 +80,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.f.xu.v.nxv4f16.nxv4i16( , - i32); + iXLen); -define @intrinsic_vfcvt_f.xu.v_nxv4f16_nxv4i16( %0, i32 %1) nounwind { +define @intrinsic_vfcvt_f.xu.v_nxv4f16_nxv4i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -96,7 +98,7 @@ define @intrinsic_vfcvt_f.xu.v_nxv4f16_nxv4i16( @llvm.riscv.vfcvt.f.xu.v.nxv4f16.nxv4i16( %0, - i32 %1) + iXLen %1) ret %a } @@ -105,10 +107,10 @@ declare @llvm.riscv.vfcvt.f.xu.v.mask.nxv4f16.nxv4i16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_f.xu.v_nxv4f16_nxv4i16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfcvt_mask_f.xu.v_nxv4f16_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -119,16 +121,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.f.xu.v.nxv8f16.nxv8i16( , - i32); + iXLen); -define @intrinsic_vfcvt_f.xu.v_nxv8f16_nxv8i16( %0, i32 %1) nounwind { +define @intrinsic_vfcvt_f.xu.v_nxv8f16_nxv8i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv8f16_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -137,7 +139,7 @@ define @intrinsic_vfcvt_f.xu.v_nxv8f16_nxv8i16( @llvm.riscv.vfcvt.f.xu.v.nxv8f16.nxv8i16( %0, - i32 %1) + iXLen %1) ret %a } @@ -146,10 +148,10 @@ declare @llvm.riscv.vfcvt.f.xu.v.mask.nxv8f16.nxv8i16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_f.xu.v_nxv8f16_nxv8i16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfcvt_mask_f.xu.v_nxv8f16_nxv8i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv8f16_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -160,16 +162,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.f.xu.v.nxv16f16.nxv16i16( , - i32); + iXLen); -define @intrinsic_vfcvt_f.xu.v_nxv16f16_nxv16i16( %0, i32 %1) nounwind { +define @intrinsic_vfcvt_f.xu.v_nxv16f16_nxv16i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv16f16_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -178,7 +180,7 @@ define @intrinsic_vfcvt_f.xu.v_nxv16f16_nxv16i16( @llvm.riscv.vfcvt.f.xu.v.nxv16f16.nxv16i16( %0, - i32 %1) + iXLen %1) ret %a } @@ -187,10 +189,10 @@ declare @llvm.riscv.vfcvt.f.xu.v.mask.nxv16f16.nxv16i16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_f.xu.v_nxv16f16_nxv16i16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfcvt_mask_f.xu.v_nxv16f16_nxv16i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv16f16_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -201,16 +203,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.f.xu.v.nxv32f16.nxv32i16( , - i32); + iXLen); -define @intrinsic_vfcvt_f.xu.v_nxv32f16_nxv32i16( %0, i32 %1) nounwind { +define @intrinsic_vfcvt_f.xu.v_nxv32f16_nxv32i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv32f16_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu @@ -219,7 +221,7 @@ define @intrinsic_vfcvt_f.xu.v_nxv32f16_nxv32i16( @llvm.riscv.vfcvt.f.xu.v.nxv32f16.nxv32i16( %0, - i32 %1) + iXLen %1) ret %a } @@ -228,10 +230,10 @@ declare @llvm.riscv.vfcvt.f.xu.v.mask.nxv32f16.nxv32i16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_f.xu.v_nxv32f16_nxv32i16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfcvt_mask_f.xu.v_nxv32f16_nxv32i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv32f16_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu @@ -242,16 +244,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.f.xu.v.nxv1f32.nxv1i32( , - i32); + iXLen); -define @intrinsic_vfcvt_f.xu.v_nxv1f32_nxv1i32( %0, i32 %1) nounwind { +define @intrinsic_vfcvt_f.xu.v_nxv1f32_nxv1i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -260,7 +262,7 @@ define @intrinsic_vfcvt_f.xu.v_nxv1f32_nxv1i32( @llvm.riscv.vfcvt.f.xu.v.nxv1f32.nxv1i32( %0, - i32 %1) + iXLen %1) ret %a } @@ -269,10 +271,10 @@ declare @llvm.riscv.vfcvt.f.xu.v.mask.nxv1f32.nxv1i32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_f.xu.v_nxv1f32_nxv1i32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfcvt_mask_f.xu.v_nxv1f32_nxv1i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -283,16 +285,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.f.xu.v.nxv2f32.nxv2i32( , - i32); + iXLen); -define @intrinsic_vfcvt_f.xu.v_nxv2f32_nxv2i32( %0, i32 %1) nounwind { +define @intrinsic_vfcvt_f.xu.v_nxv2f32_nxv2i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -301,7 +303,7 @@ define @intrinsic_vfcvt_f.xu.v_nxv2f32_nxv2i32( @llvm.riscv.vfcvt.f.xu.v.nxv2f32.nxv2i32( %0, - i32 %1) + iXLen %1) ret %a } @@ -310,10 +312,10 @@ declare @llvm.riscv.vfcvt.f.xu.v.mask.nxv2f32.nxv2i32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_f.xu.v_nxv2f32_nxv2i32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfcvt_mask_f.xu.v_nxv2f32_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -324,16 +326,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.f.xu.v.nxv4f32.nxv4i32( , - i32); + iXLen); -define @intrinsic_vfcvt_f.xu.v_nxv4f32_nxv4i32( %0, i32 %1) nounwind { +define @intrinsic_vfcvt_f.xu.v_nxv4f32_nxv4i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv4f32_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -342,7 +344,7 @@ define @intrinsic_vfcvt_f.xu.v_nxv4f32_nxv4i32( @llvm.riscv.vfcvt.f.xu.v.nxv4f32.nxv4i32( %0, - i32 %1) + iXLen %1) ret %a } @@ -351,10 +353,10 @@ declare @llvm.riscv.vfcvt.f.xu.v.mask.nxv4f32.nxv4i32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_f.xu.v_nxv4f32_nxv4i32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfcvt_mask_f.xu.v_nxv4f32_nxv4i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv4f32_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -365,16 +367,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.f.xu.v.nxv8f32.nxv8i32( , - i32); + iXLen); -define @intrinsic_vfcvt_f.xu.v_nxv8f32_nxv8i32( %0, i32 %1) nounwind { +define @intrinsic_vfcvt_f.xu.v_nxv8f32_nxv8i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv8f32_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -383,7 +385,7 @@ define @intrinsic_vfcvt_f.xu.v_nxv8f32_nxv8i32( @llvm.riscv.vfcvt.f.xu.v.nxv8f32.nxv8i32( %0, - i32 %1) + iXLen %1) ret %a } @@ -392,10 +394,10 @@ declare @llvm.riscv.vfcvt.f.xu.v.mask.nxv8f32.nxv8i32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_f.xu.v_nxv8f32_nxv8i32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfcvt_mask_f.xu.v_nxv8f32_nxv8i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv8f32_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -406,16 +408,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.f.xu.v.nxv16f32.nxv16i32( , - i32); + iXLen); -define @intrinsic_vfcvt_f.xu.v_nxv16f32_nxv16i32( %0, i32 %1) nounwind { +define @intrinsic_vfcvt_f.xu.v_nxv16f32_nxv16i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv16f32_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu @@ -424,7 +426,7 @@ define @intrinsic_vfcvt_f.xu.v_nxv16f32_nxv16i32( @llvm.riscv.vfcvt.f.xu.v.nxv16f32.nxv16i32( %0, - i32 %1) + iXLen %1) ret %a } @@ -433,10 +435,10 @@ declare @llvm.riscv.vfcvt.f.xu.v.mask.nxv16f32.nxv16i32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_f.xu.v_nxv16f32_nxv16i32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfcvt_mask_f.xu.v_nxv16f32_nxv16i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv16f32_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu @@ -447,16 +449,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.f.xu.v.nxv1f64.nxv1i64( , - i32); + iXLen); -define @intrinsic_vfcvt_f.xu.v_nxv1f64_nxv1i64( %0, i32 %1) nounwind { +define @intrinsic_vfcvt_f.xu.v_nxv1f64_nxv1i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv1f64_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu @@ -465,7 +467,7 @@ define @intrinsic_vfcvt_f.xu.v_nxv1f64_nxv1i64( @llvm.riscv.vfcvt.f.xu.v.nxv1f64.nxv1i64( %0, - i32 %1) + iXLen %1) ret %a } @@ -474,10 +476,10 @@ declare @llvm.riscv.vfcvt.f.xu.v.mask.nxv1f64.nxv1i64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_f.xu.v_nxv1f64_nxv1i64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfcvt_mask_f.xu.v_nxv1f64_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv1f64_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu @@ -488,16 +490,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.f.xu.v.nxv2f64.nxv2i64( , - i32); + iXLen); -define @intrinsic_vfcvt_f.xu.v_nxv2f64_nxv2i64( %0, i32 %1) nounwind { +define @intrinsic_vfcvt_f.xu.v_nxv2f64_nxv2i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv2f64_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu @@ -506,7 +508,7 @@ define @intrinsic_vfcvt_f.xu.v_nxv2f64_nxv2i64( @llvm.riscv.vfcvt.f.xu.v.nxv2f64.nxv2i64( %0, - i32 %1) + iXLen %1) ret %a } @@ -515,10 +517,10 @@ declare @llvm.riscv.vfcvt.f.xu.v.mask.nxv2f64.nxv2i64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_f.xu.v_nxv2f64_nxv2i64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfcvt_mask_f.xu.v_nxv2f64_nxv2i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv2f64_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu @@ -529,16 +531,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.f.xu.v.nxv4f64.nxv4i64( , - i32); + iXLen); -define @intrinsic_vfcvt_f.xu.v_nxv4f64_nxv4i64( %0, i32 %1) nounwind { +define @intrinsic_vfcvt_f.xu.v_nxv4f64_nxv4i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv4f64_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu @@ -547,7 +549,7 @@ define @intrinsic_vfcvt_f.xu.v_nxv4f64_nxv4i64( @llvm.riscv.vfcvt.f.xu.v.nxv4f64.nxv4i64( %0, - i32 %1) + iXLen %1) ret %a } @@ -556,10 +558,10 @@ declare @llvm.riscv.vfcvt.f.xu.v.mask.nxv4f64.nxv4i64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_f.xu.v_nxv4f64_nxv4i64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfcvt_mask_f.xu.v_nxv4f64_nxv4i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv4f64_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu @@ -570,16 +572,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.f.xu.v.nxv8f64.nxv8i64( , - i32); + iXLen); -define @intrinsic_vfcvt_f.xu.v_nxv8f64_nxv8i64( %0, i32 %1) nounwind { +define @intrinsic_vfcvt_f.xu.v_nxv8f64_nxv8i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv8f64_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu @@ -588,7 +590,7 @@ define @intrinsic_vfcvt_f.xu.v_nxv8f64_nxv8i64( @llvm.riscv.vfcvt.f.xu.v.nxv8f64.nxv8i64( %0, - i32 %1) + iXLen %1) ret %a } @@ -597,10 +599,10 @@ declare @llvm.riscv.vfcvt.f.xu.v.mask.nxv8f64.nxv8i64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_f.xu.v_nxv8f64_nxv8i64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfcvt_mask_f.xu.v_nxv8f64_nxv8i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv8f64_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu @@ -611,7 +613,7 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfcvt-rtz-x-f-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfcvt-rtz-x-f-rv32.ll deleted file mode 100644 index 75c0a7ff62a47152410c820e4dd55ae292cd1c46..0000000000000000000000000000000000000000 --- a/llvm/test/CodeGen/RISCV/rvv/vfcvt-rtz-x-f-rv32.ll +++ /dev/null @@ -1,617 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s -declare @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i16.nxv1f16( - , - i32); - -define @intrinsic_vfcvt_rtz.x.f.v_nxv1i16_nxv1f16( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv1i16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i16.nxv1f16( - %0, - i32 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i16.nxv1f16( - , - , - , - i32, - i32); - -define @intrinsic_vfcvt_mask_rtz.x.f.v_nxv1i16_nxv1f16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv1i16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i16.nxv1f16( - %0, - %1, - %2, - i32 %3, i32 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i16.nxv2f16( - , - i32); - -define @intrinsic_vfcvt_rtz.x.f.v_nxv2i16_nxv2f16( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv2i16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i16.nxv2f16( - %0, - i32 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i16.nxv2f16( - , - , - , - i32, - i32); - -define @intrinsic_vfcvt_mask_rtz.x.f.v_nxv2i16_nxv2f16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv2i16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i16.nxv2f16( - %0, - %1, - %2, - i32 %3, i32 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i16.nxv4f16( - , - i32); - -define @intrinsic_vfcvt_rtz.x.f.v_nxv4i16_nxv4f16( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv4i16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i16.nxv4f16( - %0, - i32 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i16.nxv4f16( - , - , - , - i32, - i32); - -define @intrinsic_vfcvt_mask_rtz.x.f.v_nxv4i16_nxv4f16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv4i16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i16.nxv4f16( - %0, - %1, - %2, - i32 %3, i32 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i16.nxv8f16( - , - i32); - -define @intrinsic_vfcvt_rtz.x.f.v_nxv8i16_nxv8f16( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv8i16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i16.nxv8f16( - %0, - i32 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i16.nxv8f16( - , - , - , - i32, - i32); - -define @intrinsic_vfcvt_mask_rtz.x.f.v_nxv8i16_nxv8f16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv8i16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i16.nxv8f16( - %0, - %1, - %2, - i32 %3, i32 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.rtz.x.f.v.nxv16i16.nxv16f16( - , - i32); - -define @intrinsic_vfcvt_rtz.x.f.v_nxv16i16_nxv16f16( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv16i16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv16i16.nxv16f16( - %0, - i32 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv16i16.nxv16f16( - , - , - , - i32, - i32); - -define @intrinsic_vfcvt_mask_rtz.x.f.v_nxv16i16_nxv16f16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv16i16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv16i16.nxv16f16( - %0, - %1, - %2, - i32 %3, i32 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.rtz.x.f.v.nxv32i16.nxv32f16( - , - i32); - -define @intrinsic_vfcvt_rtz.x.f.v_nxv32i16_nxv32f16( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv32i16_nxv32f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu -; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv32i16.nxv32f16( - %0, - i32 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv32i16.nxv32f16( - , - , - , - i32, - i32); - -define @intrinsic_vfcvt_mask_rtz.x.f.v_nxv32i16_nxv32f16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv32i16_nxv32f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu -; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv32i16.nxv32f16( - %0, - %1, - %2, - i32 %3, i32 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i32.nxv1f32( - , - i32); - -define @intrinsic_vfcvt_rtz.x.f.v_nxv1i32_nxv1f32( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv1i32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i32.nxv1f32( - %0, - i32 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i32.nxv1f32( - , - , - , - i32, - i32); - -define @intrinsic_vfcvt_mask_rtz.x.f.v_nxv1i32_nxv1f32( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv1i32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i32.nxv1f32( - %0, - %1, - %2, - i32 %3, i32 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i32.nxv2f32( - , - i32); - -define @intrinsic_vfcvt_rtz.x.f.v_nxv2i32_nxv2f32( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv2i32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i32.nxv2f32( - %0, - i32 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i32.nxv2f32( - , - , - , - i32, - i32); - -define @intrinsic_vfcvt_mask_rtz.x.f.v_nxv2i32_nxv2f32( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv2i32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i32.nxv2f32( - %0, - %1, - %2, - i32 %3, i32 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i32.nxv4f32( - , - i32); - -define @intrinsic_vfcvt_rtz.x.f.v_nxv4i32_nxv4f32( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv4i32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i32.nxv4f32( - %0, - i32 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i32.nxv4f32( - , - , - , - i32, - i32); - -define @intrinsic_vfcvt_mask_rtz.x.f.v_nxv4i32_nxv4f32( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv4i32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i32.nxv4f32( - %0, - %1, - %2, - i32 %3, i32 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i32.nxv8f32( - , - i32); - -define @intrinsic_vfcvt_rtz.x.f.v_nxv8i32_nxv8f32( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv8i32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i32.nxv8f32( - %0, - i32 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i32.nxv8f32( - , - , - , - i32, - i32); - -define @intrinsic_vfcvt_mask_rtz.x.f.v_nxv8i32_nxv8f32( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv8i32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i32.nxv8f32( - %0, - %1, - %2, - i32 %3, i32 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.rtz.x.f.v.nxv16i32.nxv16f32( - , - i32); - -define @intrinsic_vfcvt_rtz.x.f.v_nxv16i32_nxv16f32( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv16i32_nxv16f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu -; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv16i32.nxv16f32( - %0, - i32 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv16i32.nxv16f32( - , - , - , - i32, - i32); - -define @intrinsic_vfcvt_mask_rtz.x.f.v_nxv16i32_nxv16f32( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv16i32_nxv16f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu -; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv16i32.nxv16f32( - %0, - %1, - %2, - i32 %3, i32 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i64.nxv1f64( - , - i32); - -define @intrinsic_vfcvt_rtz.x.f.v_nxv1i64_nxv1f64( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv1i64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i64.nxv1f64( - %0, - i32 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i64.nxv1f64( - , - , - , - i32, - i32); - -define @intrinsic_vfcvt_mask_rtz.x.f.v_nxv1i64_nxv1f64( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv1i64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i64.nxv1f64( - %0, - %1, - %2, - i32 %3, i32 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i64.nxv2f64( - , - i32); - -define @intrinsic_vfcvt_rtz.x.f.v_nxv2i64_nxv2f64( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv2i64_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i64.nxv2f64( - %0, - i32 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i64.nxv2f64( - , - , - , - i32, - i32); - -define @intrinsic_vfcvt_mask_rtz.x.f.v_nxv2i64_nxv2f64( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv2i64_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i64.nxv2f64( - %0, - %1, - %2, - i32 %3, i32 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i64.nxv4f64( - , - i32); - -define @intrinsic_vfcvt_rtz.x.f.v_nxv4i64_nxv4f64( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv4i64_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i64.nxv4f64( - %0, - i32 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i64.nxv4f64( - , - , - , - i32, - i32); - -define @intrinsic_vfcvt_mask_rtz.x.f.v_nxv4i64_nxv4f64( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv4i64_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i64.nxv4f64( - %0, - %1, - %2, - i32 %3, i32 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i64.nxv8f64( - , - i32); - -define @intrinsic_vfcvt_rtz.x.f.v_nxv8i64_nxv8f64( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv8i64_nxv8f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu -; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i64.nxv8f64( - %0, - i32 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i64.nxv8f64( - , - , - , - i32, - i32); - -define @intrinsic_vfcvt_mask_rtz.x.f.v_nxv8i64_nxv8f64( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv8i64_nxv8f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu -; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i64.nxv8f64( - %0, - %1, - %2, - i32 %3, i32 1) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfcvt-rtz-x-f-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfcvt-rtz-x-f.ll similarity index 87% rename from llvm/test/CodeGen/RISCV/rvv/vfcvt-rtz-x-f-rv64.ll rename to llvm/test/CodeGen/RISCV/rvv/vfcvt-rtz-x-f.ll index 9c3e2a03f1141fdfa6c929bcf6d9998fa294dfa6..0dbc0f221e7e60d467a677fa62426f0766232b3e 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfcvt-rtz-x-f-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfcvt-rtz-x-f.ll @@ -1,11 +1,13 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i16.nxv1f16( , - i64); + iXLen); -define @intrinsic_vfcvt_rtz.x.f.v_nxv1i16_nxv1f16( %0, i64 %1) nounwind { +define @intrinsic_vfcvt_rtz.x.f.v_nxv1i16_nxv1f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv1i16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -14,7 +16,7 @@ define @intrinsic_vfcvt_rtz.x.f.v_nxv1i16_nxv1f16( @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i16.nxv1f16( %0, - i64 %1) + iXLen %1) ret %a } @@ -23,10 +25,10 @@ declare @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i16.nxv1f16( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_rtz.x.f.v_nxv1i16_nxv1f16( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vfcvt_mask_rtz.x.f.v_nxv1i16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv1i16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -37,16 +39,16 @@ entry: %0, %1, %2, - i64 %3, i64 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i16.nxv2f16( , - i64); + iXLen); -define @intrinsic_vfcvt_rtz.x.f.v_nxv2i16_nxv2f16( %0, i64 %1) nounwind { +define @intrinsic_vfcvt_rtz.x.f.v_nxv2i16_nxv2f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv2i16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -55,7 +57,7 @@ define @intrinsic_vfcvt_rtz.x.f.v_nxv2i16_nxv2f16( @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i16.nxv2f16( %0, - i64 %1) + iXLen %1) ret %a } @@ -64,10 +66,10 @@ declare @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i16.nxv2f16( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_rtz.x.f.v_nxv2i16_nxv2f16( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vfcvt_mask_rtz.x.f.v_nxv2i16_nxv2f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv2i16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -78,16 +80,16 @@ entry: %0, %1, %2, - i64 %3, i64 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i16.nxv4f16( , - i64); + iXLen); -define @intrinsic_vfcvt_rtz.x.f.v_nxv4i16_nxv4f16( %0, i64 %1) nounwind { +define @intrinsic_vfcvt_rtz.x.f.v_nxv4i16_nxv4f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv4i16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -96,7 +98,7 @@ define @intrinsic_vfcvt_rtz.x.f.v_nxv4i16_nxv4f16( @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i16.nxv4f16( %0, - i64 %1) + iXLen %1) ret %a } @@ -105,10 +107,10 @@ declare @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i16.nxv4f16( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_rtz.x.f.v_nxv4i16_nxv4f16( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vfcvt_mask_rtz.x.f.v_nxv4i16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv4i16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -119,16 +121,16 @@ entry: %0, %1, %2, - i64 %3, i64 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i16.nxv8f16( , - i64); + iXLen); -define @intrinsic_vfcvt_rtz.x.f.v_nxv8i16_nxv8f16( %0, i64 %1) nounwind { +define @intrinsic_vfcvt_rtz.x.f.v_nxv8i16_nxv8f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv8i16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -137,7 +139,7 @@ define @intrinsic_vfcvt_rtz.x.f.v_nxv8i16_nxv8f16( @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i16.nxv8f16( %0, - i64 %1) + iXLen %1) ret %a } @@ -146,10 +148,10 @@ declare @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i16.nxv8f16( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_rtz.x.f.v_nxv8i16_nxv8f16( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vfcvt_mask_rtz.x.f.v_nxv8i16_nxv8f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv8i16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -160,16 +162,16 @@ entry: %0, %1, %2, - i64 %3, i64 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.rtz.x.f.v.nxv16i16.nxv16f16( , - i64); + iXLen); -define @intrinsic_vfcvt_rtz.x.f.v_nxv16i16_nxv16f16( %0, i64 %1) nounwind { +define @intrinsic_vfcvt_rtz.x.f.v_nxv16i16_nxv16f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv16i16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -178,7 +180,7 @@ define @intrinsic_vfcvt_rtz.x.f.v_nxv16i16_nxv16f16( @llvm.riscv.vfcvt.rtz.x.f.v.nxv16i16.nxv16f16( %0, - i64 %1) + iXLen %1) ret %a } @@ -187,10 +189,10 @@ declare @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv16i16.nxv16f16( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_rtz.x.f.v_nxv16i16_nxv16f16( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vfcvt_mask_rtz.x.f.v_nxv16i16_nxv16f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv16i16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -201,16 +203,16 @@ entry: %0, %1, %2, - i64 %3, i64 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.rtz.x.f.v.nxv32i16.nxv32f16( , - i64); + iXLen); -define @intrinsic_vfcvt_rtz.x.f.v_nxv32i16_nxv32f16( %0, i64 %1) nounwind { +define @intrinsic_vfcvt_rtz.x.f.v_nxv32i16_nxv32f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv32i16_nxv32f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu @@ -219,7 +221,7 @@ define @intrinsic_vfcvt_rtz.x.f.v_nxv32i16_nxv32f16( @llvm.riscv.vfcvt.rtz.x.f.v.nxv32i16.nxv32f16( %0, - i64 %1) + iXLen %1) ret %a } @@ -228,10 +230,10 @@ declare @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv32i16.nxv32f16( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_rtz.x.f.v_nxv32i16_nxv32f16( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vfcvt_mask_rtz.x.f.v_nxv32i16_nxv32f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv32i16_nxv32f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu @@ -242,16 +244,16 @@ entry: %0, %1, %2, - i64 %3, i64 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i32.nxv1f32( , - i64); + iXLen); -define @intrinsic_vfcvt_rtz.x.f.v_nxv1i32_nxv1f32( %0, i64 %1) nounwind { +define @intrinsic_vfcvt_rtz.x.f.v_nxv1i32_nxv1f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv1i32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -260,7 +262,7 @@ define @intrinsic_vfcvt_rtz.x.f.v_nxv1i32_nxv1f32( @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i32.nxv1f32( %0, - i64 %1) + iXLen %1) ret %a } @@ -269,10 +271,10 @@ declare @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i32.nxv1f32( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_rtz.x.f.v_nxv1i32_nxv1f32( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vfcvt_mask_rtz.x.f.v_nxv1i32_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv1i32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -283,16 +285,16 @@ entry: %0, %1, %2, - i64 %3, i64 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i32.nxv2f32( , - i64); + iXLen); -define @intrinsic_vfcvt_rtz.x.f.v_nxv2i32_nxv2f32( %0, i64 %1) nounwind { +define @intrinsic_vfcvt_rtz.x.f.v_nxv2i32_nxv2f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv2i32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -301,7 +303,7 @@ define @intrinsic_vfcvt_rtz.x.f.v_nxv2i32_nxv2f32( @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i32.nxv2f32( %0, - i64 %1) + iXLen %1) ret %a } @@ -310,10 +312,10 @@ declare @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i32.nxv2f32( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_rtz.x.f.v_nxv2i32_nxv2f32( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vfcvt_mask_rtz.x.f.v_nxv2i32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv2i32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -324,16 +326,16 @@ entry: %0, %1, %2, - i64 %3, i64 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i32.nxv4f32( , - i64); + iXLen); -define @intrinsic_vfcvt_rtz.x.f.v_nxv4i32_nxv4f32( %0, i64 %1) nounwind { +define @intrinsic_vfcvt_rtz.x.f.v_nxv4i32_nxv4f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv4i32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -342,7 +344,7 @@ define @intrinsic_vfcvt_rtz.x.f.v_nxv4i32_nxv4f32( @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i32.nxv4f32( %0, - i64 %1) + iXLen %1) ret %a } @@ -351,10 +353,10 @@ declare @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i32.nxv4f32( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_rtz.x.f.v_nxv4i32_nxv4f32( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vfcvt_mask_rtz.x.f.v_nxv4i32_nxv4f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv4i32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -365,16 +367,16 @@ entry: %0, %1, %2, - i64 %3, i64 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i32.nxv8f32( , - i64); + iXLen); -define @intrinsic_vfcvt_rtz.x.f.v_nxv8i32_nxv8f32( %0, i64 %1) nounwind { +define @intrinsic_vfcvt_rtz.x.f.v_nxv8i32_nxv8f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv8i32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -383,7 +385,7 @@ define @intrinsic_vfcvt_rtz.x.f.v_nxv8i32_nxv8f32( @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i32.nxv8f32( %0, - i64 %1) + iXLen %1) ret %a } @@ -392,10 +394,10 @@ declare @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i32.nxv8f32( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_rtz.x.f.v_nxv8i32_nxv8f32( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vfcvt_mask_rtz.x.f.v_nxv8i32_nxv8f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv8i32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -406,16 +408,16 @@ entry: %0, %1, %2, - i64 %3, i64 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.rtz.x.f.v.nxv16i32.nxv16f32( , - i64); + iXLen); -define @intrinsic_vfcvt_rtz.x.f.v_nxv16i32_nxv16f32( %0, i64 %1) nounwind { +define @intrinsic_vfcvt_rtz.x.f.v_nxv16i32_nxv16f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv16i32_nxv16f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu @@ -424,7 +426,7 @@ define @intrinsic_vfcvt_rtz.x.f.v_nxv16i32_nxv16f32( @llvm.riscv.vfcvt.rtz.x.f.v.nxv16i32.nxv16f32( %0, - i64 %1) + iXLen %1) ret %a } @@ -433,10 +435,10 @@ declare @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv16i32.nxv16f32( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_rtz.x.f.v_nxv16i32_nxv16f32( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vfcvt_mask_rtz.x.f.v_nxv16i32_nxv16f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv16i32_nxv16f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu @@ -447,16 +449,16 @@ entry: %0, %1, %2, - i64 %3, i64 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i64.nxv1f64( , - i64); + iXLen); -define @intrinsic_vfcvt_rtz.x.f.v_nxv1i64_nxv1f64( %0, i64 %1) nounwind { +define @intrinsic_vfcvt_rtz.x.f.v_nxv1i64_nxv1f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv1i64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu @@ -465,7 +467,7 @@ define @intrinsic_vfcvt_rtz.x.f.v_nxv1i64_nxv1f64( @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i64.nxv1f64( %0, - i64 %1) + iXLen %1) ret %a } @@ -474,10 +476,10 @@ declare @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i64.nxv1f64( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_rtz.x.f.v_nxv1i64_nxv1f64( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vfcvt_mask_rtz.x.f.v_nxv1i64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv1i64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu @@ -488,16 +490,16 @@ entry: %0, %1, %2, - i64 %3, i64 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i64.nxv2f64( , - i64); + iXLen); -define @intrinsic_vfcvt_rtz.x.f.v_nxv2i64_nxv2f64( %0, i64 %1) nounwind { +define @intrinsic_vfcvt_rtz.x.f.v_nxv2i64_nxv2f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv2i64_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu @@ -506,7 +508,7 @@ define @intrinsic_vfcvt_rtz.x.f.v_nxv2i64_nxv2f64( @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i64.nxv2f64( %0, - i64 %1) + iXLen %1) ret %a } @@ -515,10 +517,10 @@ declare @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i64.nxv2f64( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_rtz.x.f.v_nxv2i64_nxv2f64( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vfcvt_mask_rtz.x.f.v_nxv2i64_nxv2f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv2i64_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu @@ -529,16 +531,16 @@ entry: %0, %1, %2, - i64 %3, i64 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i64.nxv4f64( , - i64); + iXLen); -define @intrinsic_vfcvt_rtz.x.f.v_nxv4i64_nxv4f64( %0, i64 %1) nounwind { +define @intrinsic_vfcvt_rtz.x.f.v_nxv4i64_nxv4f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv4i64_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu @@ -547,7 +549,7 @@ define @intrinsic_vfcvt_rtz.x.f.v_nxv4i64_nxv4f64( @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i64.nxv4f64( %0, - i64 %1) + iXLen %1) ret %a } @@ -556,10 +558,10 @@ declare @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i64.nxv4f64( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_rtz.x.f.v_nxv4i64_nxv4f64( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vfcvt_mask_rtz.x.f.v_nxv4i64_nxv4f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv4i64_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu @@ -570,16 +572,16 @@ entry: %0, %1, %2, - i64 %3, i64 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i64.nxv8f64( , - i64); + iXLen); -define @intrinsic_vfcvt_rtz.x.f.v_nxv8i64_nxv8f64( %0, i64 %1) nounwind { +define @intrinsic_vfcvt_rtz.x.f.v_nxv8i64_nxv8f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv8i64_nxv8f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu @@ -588,7 +590,7 @@ define @intrinsic_vfcvt_rtz.x.f.v_nxv8i64_nxv8f64( @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i64.nxv8f64( %0, - i64 %1) + iXLen %1) ret %a } @@ -597,10 +599,10 @@ declare @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i64.nxv8f64( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_rtz.x.f.v_nxv8i64_nxv8f64( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vfcvt_mask_rtz.x.f.v_nxv8i64_nxv8f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv8i64_nxv8f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu @@ -611,7 +613,7 @@ entry: %0, %1, %2, - i64 %3, i64 1) + iXLen %3, iXLen 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfcvt-rtz-xu-f-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfcvt-rtz-xu-f-rv64.ll deleted file mode 100644 index 3a309eea35dbd4c61587f6fe864fe6be4f267f45..0000000000000000000000000000000000000000 --- a/llvm/test/CodeGen/RISCV/rvv/vfcvt-rtz-xu-f-rv64.ll +++ /dev/null @@ -1,617 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s -declare @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i16.nxv1f16( - , - i64); - -define @intrinsic_vfcvt_rtz.xu.f.v_nxv1i16_nxv1f16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_rtz.xu.f.v_nxv1i16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i16.nxv1f16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i16.nxv1f16( - , - , - , - i64, - i64); - -define @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv1i16_nxv1f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.xu.f.v_nxv1i16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i16.nxv1f16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i16.nxv2f16( - , - i64); - -define @intrinsic_vfcvt_rtz.xu.f.v_nxv2i16_nxv2f16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_rtz.xu.f.v_nxv2i16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i16.nxv2f16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i16.nxv2f16( - , - , - , - i64, - i64); - -define @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv2i16_nxv2f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.xu.f.v_nxv2i16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i16.nxv2f16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i16.nxv4f16( - , - i64); - -define @intrinsic_vfcvt_rtz.xu.f.v_nxv4i16_nxv4f16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_rtz.xu.f.v_nxv4i16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i16.nxv4f16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i16.nxv4f16( - , - , - , - i64, - i64); - -define @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv4i16_nxv4f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.xu.f.v_nxv4i16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i16.nxv4f16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i16.nxv8f16( - , - i64); - -define @intrinsic_vfcvt_rtz.xu.f.v_nxv8i16_nxv8f16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_rtz.xu.f.v_nxv8i16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i16.nxv8f16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i16.nxv8f16( - , - , - , - i64, - i64); - -define @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv8i16_nxv8f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.xu.f.v_nxv8i16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i16.nxv8f16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.rtz.xu.f.v.nxv16i16.nxv16f16( - , - i64); - -define @intrinsic_vfcvt_rtz.xu.f.v_nxv16i16_nxv16f16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_rtz.xu.f.v_nxv16i16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv16i16.nxv16f16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv16i16.nxv16f16( - , - , - , - i64, - i64); - -define @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv16i16_nxv16f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.xu.f.v_nxv16i16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv16i16.nxv16f16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.rtz.xu.f.v.nxv32i16.nxv32f16( - , - i64); - -define @intrinsic_vfcvt_rtz.xu.f.v_nxv32i16_nxv32f16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_rtz.xu.f.v_nxv32i16_nxv32f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu -; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv32i16.nxv32f16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv32i16.nxv32f16( - , - , - , - i64, - i64); - -define @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv32i16_nxv32f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.xu.f.v_nxv32i16_nxv32f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu -; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv32i16.nxv32f16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i32.nxv1f32( - , - i64); - -define @intrinsic_vfcvt_rtz.xu.f.v_nxv1i32_nxv1f32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_rtz.xu.f.v_nxv1i32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i32.nxv1f32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i32.nxv1f32( - , - , - , - i64, - i64); - -define @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv1i32_nxv1f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.xu.f.v_nxv1i32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i32.nxv1f32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i32.nxv2f32( - , - i64); - -define @intrinsic_vfcvt_rtz.xu.f.v_nxv2i32_nxv2f32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_rtz.xu.f.v_nxv2i32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i32.nxv2f32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i32.nxv2f32( - , - , - , - i64, - i64); - -define @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv2i32_nxv2f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.xu.f.v_nxv2i32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i32.nxv2f32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i32.nxv4f32( - , - i64); - -define @intrinsic_vfcvt_rtz.xu.f.v_nxv4i32_nxv4f32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_rtz.xu.f.v_nxv4i32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i32.nxv4f32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i32.nxv4f32( - , - , - , - i64, - i64); - -define @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv4i32_nxv4f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.xu.f.v_nxv4i32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i32.nxv4f32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i32.nxv8f32( - , - i64); - -define @intrinsic_vfcvt_rtz.xu.f.v_nxv8i32_nxv8f32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_rtz.xu.f.v_nxv8i32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i32.nxv8f32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i32.nxv8f32( - , - , - , - i64, - i64); - -define @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv8i32_nxv8f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.xu.f.v_nxv8i32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i32.nxv8f32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.rtz.xu.f.v.nxv16i32.nxv16f32( - , - i64); - -define @intrinsic_vfcvt_rtz.xu.f.v_nxv16i32_nxv16f32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_rtz.xu.f.v_nxv16i32_nxv16f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu -; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv16i32.nxv16f32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv16i32.nxv16f32( - , - , - , - i64, - i64); - -define @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv16i32_nxv16f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.xu.f.v_nxv16i32_nxv16f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu -; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv16i32.nxv16f32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i64.nxv1f64( - , - i64); - -define @intrinsic_vfcvt_rtz.xu.f.v_nxv1i64_nxv1f64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_rtz.xu.f.v_nxv1i64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i64.nxv1f64( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i64.nxv1f64( - , - , - , - i64, - i64); - -define @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv1i64_nxv1f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.xu.f.v_nxv1i64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i64.nxv1f64( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i64.nxv2f64( - , - i64); - -define @intrinsic_vfcvt_rtz.xu.f.v_nxv2i64_nxv2f64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_rtz.xu.f.v_nxv2i64_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i64.nxv2f64( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i64.nxv2f64( - , - , - , - i64, - i64); - -define @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv2i64_nxv2f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.xu.f.v_nxv2i64_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i64.nxv2f64( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i64.nxv4f64( - , - i64); - -define @intrinsic_vfcvt_rtz.xu.f.v_nxv4i64_nxv4f64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_rtz.xu.f.v_nxv4i64_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i64.nxv4f64( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i64.nxv4f64( - , - , - , - i64, - i64); - -define @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv4i64_nxv4f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.xu.f.v_nxv4i64_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i64.nxv4f64( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i64.nxv8f64( - , - i64); - -define @intrinsic_vfcvt_rtz.xu.f.v_nxv8i64_nxv8f64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_rtz.xu.f.v_nxv8i64_nxv8f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu -; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i64.nxv8f64( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i64.nxv8f64( - , - , - , - i64, - i64); - -define @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv8i64_nxv8f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.xu.f.v_nxv8i64_nxv8f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu -; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i64.nxv8f64( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfcvt-rtz-xu-f-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfcvt-rtz-xu-f.ll similarity index 87% rename from llvm/test/CodeGen/RISCV/rvv/vfcvt-rtz-xu-f-rv32.ll rename to llvm/test/CodeGen/RISCV/rvv/vfcvt-rtz-xu-f.ll index 966a5d6f85a0f3a06467c454cbdeb0802cdc3e3c..457a93587ec2b6e4ffbf4eb84c8b8fdec283cfad 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfcvt-rtz-xu-f-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfcvt-rtz-xu-f.ll @@ -1,11 +1,13 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i16.nxv1f16( , - i32); + iXLen); -define @intrinsic_vfcvt_rtz.xu.f.v_nxv1i16_nxv1f16( %0, i32 %1) nounwind { +define @intrinsic_vfcvt_rtz.xu.f.v_nxv1i16_nxv1f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_rtz.xu.f.v_nxv1i16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -14,7 +16,7 @@ define @intrinsic_vfcvt_rtz.xu.f.v_nxv1i16_nxv1f16( @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i16.nxv1f16( %0, - i32 %1) + iXLen %1) ret %a } @@ -23,10 +25,10 @@ declare @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i16.nxv1f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv1i16_nxv1f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv1i16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.xu.f.v_nxv1i16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -37,16 +39,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i16.nxv2f16( , - i32); + iXLen); -define @intrinsic_vfcvt_rtz.xu.f.v_nxv2i16_nxv2f16( %0, i32 %1) nounwind { +define @intrinsic_vfcvt_rtz.xu.f.v_nxv2i16_nxv2f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_rtz.xu.f.v_nxv2i16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -55,7 +57,7 @@ define @intrinsic_vfcvt_rtz.xu.f.v_nxv2i16_nxv2f16( @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i16.nxv2f16( %0, - i32 %1) + iXLen %1) ret %a } @@ -64,10 +66,10 @@ declare @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i16.nxv2f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv2i16_nxv2f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv2i16_nxv2f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.xu.f.v_nxv2i16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -78,16 +80,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i16.nxv4f16( , - i32); + iXLen); -define @intrinsic_vfcvt_rtz.xu.f.v_nxv4i16_nxv4f16( %0, i32 %1) nounwind { +define @intrinsic_vfcvt_rtz.xu.f.v_nxv4i16_nxv4f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_rtz.xu.f.v_nxv4i16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -96,7 +98,7 @@ define @intrinsic_vfcvt_rtz.xu.f.v_nxv4i16_nxv4f16( @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i16.nxv4f16( %0, - i32 %1) + iXLen %1) ret %a } @@ -105,10 +107,10 @@ declare @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i16.nxv4f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv4i16_nxv4f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv4i16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.xu.f.v_nxv4i16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -119,16 +121,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i16.nxv8f16( , - i32); + iXLen); -define @intrinsic_vfcvt_rtz.xu.f.v_nxv8i16_nxv8f16( %0, i32 %1) nounwind { +define @intrinsic_vfcvt_rtz.xu.f.v_nxv8i16_nxv8f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_rtz.xu.f.v_nxv8i16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -137,7 +139,7 @@ define @intrinsic_vfcvt_rtz.xu.f.v_nxv8i16_nxv8f16( @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i16.nxv8f16( %0, - i32 %1) + iXLen %1) ret %a } @@ -146,10 +148,10 @@ declare @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i16.nxv8f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv8i16_nxv8f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv8i16_nxv8f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.xu.f.v_nxv8i16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -160,16 +162,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.rtz.xu.f.v.nxv16i16.nxv16f16( , - i32); + iXLen); -define @intrinsic_vfcvt_rtz.xu.f.v_nxv16i16_nxv16f16( %0, i32 %1) nounwind { +define @intrinsic_vfcvt_rtz.xu.f.v_nxv16i16_nxv16f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_rtz.xu.f.v_nxv16i16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -178,7 +180,7 @@ define @intrinsic_vfcvt_rtz.xu.f.v_nxv16i16_nxv16f16( @llvm.riscv.vfcvt.rtz.xu.f.v.nxv16i16.nxv16f16( %0, - i32 %1) + iXLen %1) ret %a } @@ -187,10 +189,10 @@ declare @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv16i16.nxv16f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv16i16_nxv16f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv16i16_nxv16f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.xu.f.v_nxv16i16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -201,16 +203,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.rtz.xu.f.v.nxv32i16.nxv32f16( , - i32); + iXLen); -define @intrinsic_vfcvt_rtz.xu.f.v_nxv32i16_nxv32f16( %0, i32 %1) nounwind { +define @intrinsic_vfcvt_rtz.xu.f.v_nxv32i16_nxv32f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_rtz.xu.f.v_nxv32i16_nxv32f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu @@ -219,7 +221,7 @@ define @intrinsic_vfcvt_rtz.xu.f.v_nxv32i16_nxv32f16( @llvm.riscv.vfcvt.rtz.xu.f.v.nxv32i16.nxv32f16( %0, - i32 %1) + iXLen %1) ret %a } @@ -228,10 +230,10 @@ declare @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv32i16.nxv32f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv32i16_nxv32f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv32i16_nxv32f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.xu.f.v_nxv32i16_nxv32f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu @@ -242,16 +244,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i32.nxv1f32( , - i32); + iXLen); -define @intrinsic_vfcvt_rtz.xu.f.v_nxv1i32_nxv1f32( %0, i32 %1) nounwind { +define @intrinsic_vfcvt_rtz.xu.f.v_nxv1i32_nxv1f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_rtz.xu.f.v_nxv1i32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -260,7 +262,7 @@ define @intrinsic_vfcvt_rtz.xu.f.v_nxv1i32_nxv1f32( @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i32.nxv1f32( %0, - i32 %1) + iXLen %1) ret %a } @@ -269,10 +271,10 @@ declare @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i32.nxv1f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv1i32_nxv1f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv1i32_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.xu.f.v_nxv1i32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -283,16 +285,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i32.nxv2f32( , - i32); + iXLen); -define @intrinsic_vfcvt_rtz.xu.f.v_nxv2i32_nxv2f32( %0, i32 %1) nounwind { +define @intrinsic_vfcvt_rtz.xu.f.v_nxv2i32_nxv2f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_rtz.xu.f.v_nxv2i32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -301,7 +303,7 @@ define @intrinsic_vfcvt_rtz.xu.f.v_nxv2i32_nxv2f32( @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i32.nxv2f32( %0, - i32 %1) + iXLen %1) ret %a } @@ -310,10 +312,10 @@ declare @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i32.nxv2f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv2i32_nxv2f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv2i32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.xu.f.v_nxv2i32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -324,16 +326,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i32.nxv4f32( , - i32); + iXLen); -define @intrinsic_vfcvt_rtz.xu.f.v_nxv4i32_nxv4f32( %0, i32 %1) nounwind { +define @intrinsic_vfcvt_rtz.xu.f.v_nxv4i32_nxv4f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_rtz.xu.f.v_nxv4i32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -342,7 +344,7 @@ define @intrinsic_vfcvt_rtz.xu.f.v_nxv4i32_nxv4f32( @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i32.nxv4f32( %0, - i32 %1) + iXLen %1) ret %a } @@ -351,10 +353,10 @@ declare @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i32.nxv4f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv4i32_nxv4f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv4i32_nxv4f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.xu.f.v_nxv4i32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -365,16 +367,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i32.nxv8f32( , - i32); + iXLen); -define @intrinsic_vfcvt_rtz.xu.f.v_nxv8i32_nxv8f32( %0, i32 %1) nounwind { +define @intrinsic_vfcvt_rtz.xu.f.v_nxv8i32_nxv8f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_rtz.xu.f.v_nxv8i32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -383,7 +385,7 @@ define @intrinsic_vfcvt_rtz.xu.f.v_nxv8i32_nxv8f32( @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i32.nxv8f32( %0, - i32 %1) + iXLen %1) ret %a } @@ -392,10 +394,10 @@ declare @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i32.nxv8f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv8i32_nxv8f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv8i32_nxv8f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.xu.f.v_nxv8i32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -406,16 +408,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.rtz.xu.f.v.nxv16i32.nxv16f32( , - i32); + iXLen); -define @intrinsic_vfcvt_rtz.xu.f.v_nxv16i32_nxv16f32( %0, i32 %1) nounwind { +define @intrinsic_vfcvt_rtz.xu.f.v_nxv16i32_nxv16f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_rtz.xu.f.v_nxv16i32_nxv16f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu @@ -424,7 +426,7 @@ define @intrinsic_vfcvt_rtz.xu.f.v_nxv16i32_nxv16f32( @llvm.riscv.vfcvt.rtz.xu.f.v.nxv16i32.nxv16f32( %0, - i32 %1) + iXLen %1) ret %a } @@ -433,10 +435,10 @@ declare @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv16i32.nxv16f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv16i32_nxv16f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv16i32_nxv16f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.xu.f.v_nxv16i32_nxv16f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu @@ -447,16 +449,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i64.nxv1f64( , - i32); + iXLen); -define @intrinsic_vfcvt_rtz.xu.f.v_nxv1i64_nxv1f64( %0, i32 %1) nounwind { +define @intrinsic_vfcvt_rtz.xu.f.v_nxv1i64_nxv1f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_rtz.xu.f.v_nxv1i64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu @@ -465,7 +467,7 @@ define @intrinsic_vfcvt_rtz.xu.f.v_nxv1i64_nxv1f64( @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i64.nxv1f64( %0, - i32 %1) + iXLen %1) ret %a } @@ -474,10 +476,10 @@ declare @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i64.nxv1f64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv1i64_nxv1f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv1i64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.xu.f.v_nxv1i64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu @@ -488,16 +490,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i64.nxv2f64( , - i32); + iXLen); -define @intrinsic_vfcvt_rtz.xu.f.v_nxv2i64_nxv2f64( %0, i32 %1) nounwind { +define @intrinsic_vfcvt_rtz.xu.f.v_nxv2i64_nxv2f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_rtz.xu.f.v_nxv2i64_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu @@ -506,7 +508,7 @@ define @intrinsic_vfcvt_rtz.xu.f.v_nxv2i64_nxv2f64( @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i64.nxv2f64( %0, - i32 %1) + iXLen %1) ret %a } @@ -515,10 +517,10 @@ declare @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i64.nxv2f64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv2i64_nxv2f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv2i64_nxv2f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.xu.f.v_nxv2i64_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu @@ -529,16 +531,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i64.nxv4f64( , - i32); + iXLen); -define @intrinsic_vfcvt_rtz.xu.f.v_nxv4i64_nxv4f64( %0, i32 %1) nounwind { +define @intrinsic_vfcvt_rtz.xu.f.v_nxv4i64_nxv4f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_rtz.xu.f.v_nxv4i64_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu @@ -547,7 +549,7 @@ define @intrinsic_vfcvt_rtz.xu.f.v_nxv4i64_nxv4f64( @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i64.nxv4f64( %0, - i32 %1) + iXLen %1) ret %a } @@ -556,10 +558,10 @@ declare @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i64.nxv4f64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv4i64_nxv4f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv4i64_nxv4f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.xu.f.v_nxv4i64_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu @@ -570,16 +572,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i64.nxv8f64( , - i32); + iXLen); -define @intrinsic_vfcvt_rtz.xu.f.v_nxv8i64_nxv8f64( %0, i32 %1) nounwind { +define @intrinsic_vfcvt_rtz.xu.f.v_nxv8i64_nxv8f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_rtz.xu.f.v_nxv8i64_nxv8f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu @@ -588,7 +590,7 @@ define @intrinsic_vfcvt_rtz.xu.f.v_nxv8i64_nxv8f64( @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i64.nxv8f64( %0, - i32 %1) + iXLen %1) ret %a } @@ -597,10 +599,10 @@ declare @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i64.nxv8f64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv8i64_nxv8f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv8i64_nxv8f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.xu.f.v_nxv8i64_nxv8f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu @@ -611,7 +613,7 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfcvt-x-f-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfcvt-x-f-rv64.ll deleted file mode 100644 index f5984a512e0feeb74858ab63f2a84dae11319c36..0000000000000000000000000000000000000000 --- a/llvm/test/CodeGen/RISCV/rvv/vfcvt-x-f-rv64.ll +++ /dev/null @@ -1,617 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s -declare @llvm.riscv.vfcvt.x.f.v.nxv1i16.nxv1f16( - , - i64); - -define @intrinsic_vfcvt_x.f.v_nxv1i16_nxv1f16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv1i16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfcvt.x.f.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.x.f.v.nxv1i16.nxv1f16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.x.f.v.mask.nxv1i16.nxv1f16( - , - , - , - i64, - i64); - -define @intrinsic_vfcvt_mask_x.f.v_nxv1i16_nxv1f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv1i16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfcvt.x.f.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.x.f.v.mask.nxv1i16.nxv1f16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.x.f.v.nxv2i16.nxv2f16( - , - i64); - -define @intrinsic_vfcvt_x.f.v_nxv2i16_nxv2f16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv2i16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfcvt.x.f.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.x.f.v.nxv2i16.nxv2f16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.x.f.v.mask.nxv2i16.nxv2f16( - , - , - , - i64, - i64); - -define @intrinsic_vfcvt_mask_x.f.v_nxv2i16_nxv2f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv2i16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfcvt.x.f.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.x.f.v.mask.nxv2i16.nxv2f16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.x.f.v.nxv4i16.nxv4f16( - , - i64); - -define @intrinsic_vfcvt_x.f.v_nxv4i16_nxv4f16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv4i16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfcvt.x.f.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.x.f.v.nxv4i16.nxv4f16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.x.f.v.mask.nxv4i16.nxv4f16( - , - , - , - i64, - i64); - -define @intrinsic_vfcvt_mask_x.f.v_nxv4i16_nxv4f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv4i16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfcvt.x.f.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.x.f.v.mask.nxv4i16.nxv4f16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.x.f.v.nxv8i16.nxv8f16( - , - i64); - -define @intrinsic_vfcvt_x.f.v_nxv8i16_nxv8f16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv8i16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfcvt.x.f.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.x.f.v.nxv8i16.nxv8f16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.x.f.v.mask.nxv8i16.nxv8f16( - , - , - , - i64, - i64); - -define @intrinsic_vfcvt_mask_x.f.v_nxv8i16_nxv8f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv8i16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfcvt.x.f.v v8, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.x.f.v.mask.nxv8i16.nxv8f16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.x.f.v.nxv16i16.nxv16f16( - , - i64); - -define @intrinsic_vfcvt_x.f.v_nxv16i16_nxv16f16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv16i16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfcvt.x.f.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.x.f.v.nxv16i16.nxv16f16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.x.f.v.mask.nxv16i16.nxv16f16( - , - , - , - i64, - i64); - -define @intrinsic_vfcvt_mask_x.f.v_nxv16i16_nxv16f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv16i16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfcvt.x.f.v v8, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.x.f.v.mask.nxv16i16.nxv16f16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.x.f.v.nxv32i16.nxv32f16( - , - i64); - -define @intrinsic_vfcvt_x.f.v_nxv32i16_nxv32f16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv32i16_nxv32f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu -; CHECK-NEXT: vfcvt.x.f.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.x.f.v.nxv32i16.nxv32f16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.x.f.v.mask.nxv32i16.nxv32f16( - , - , - , - i64, - i64); - -define @intrinsic_vfcvt_mask_x.f.v_nxv32i16_nxv32f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv32i16_nxv32f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu -; CHECK-NEXT: vfcvt.x.f.v v8, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.x.f.v.mask.nxv32i16.nxv32f16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.x.f.v.nxv1i32.nxv1f32( - , - i64); - -define @intrinsic_vfcvt_x.f.v_nxv1i32_nxv1f32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv1i32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfcvt.x.f.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.x.f.v.nxv1i32.nxv1f32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.x.f.v.mask.nxv1i32.nxv1f32( - , - , - , - i64, - i64); - -define @intrinsic_vfcvt_mask_x.f.v_nxv1i32_nxv1f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv1i32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfcvt.x.f.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.x.f.v.mask.nxv1i32.nxv1f32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.x.f.v.nxv2i32.nxv2f32( - , - i64); - -define @intrinsic_vfcvt_x.f.v_nxv2i32_nxv2f32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv2i32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfcvt.x.f.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.x.f.v.nxv2i32.nxv2f32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.x.f.v.mask.nxv2i32.nxv2f32( - , - , - , - i64, - i64); - -define @intrinsic_vfcvt_mask_x.f.v_nxv2i32_nxv2f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv2i32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfcvt.x.f.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.x.f.v.mask.nxv2i32.nxv2f32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.x.f.v.nxv4i32.nxv4f32( - , - i64); - -define @intrinsic_vfcvt_x.f.v_nxv4i32_nxv4f32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv4i32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfcvt.x.f.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.x.f.v.nxv4i32.nxv4f32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.x.f.v.mask.nxv4i32.nxv4f32( - , - , - , - i64, - i64); - -define @intrinsic_vfcvt_mask_x.f.v_nxv4i32_nxv4f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv4i32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfcvt.x.f.v v8, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.x.f.v.mask.nxv4i32.nxv4f32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.x.f.v.nxv8i32.nxv8f32( - , - i64); - -define @intrinsic_vfcvt_x.f.v_nxv8i32_nxv8f32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv8i32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfcvt.x.f.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.x.f.v.nxv8i32.nxv8f32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.x.f.v.mask.nxv8i32.nxv8f32( - , - , - , - i64, - i64); - -define @intrinsic_vfcvt_mask_x.f.v_nxv8i32_nxv8f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv8i32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfcvt.x.f.v v8, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.x.f.v.mask.nxv8i32.nxv8f32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.x.f.v.nxv16i32.nxv16f32( - , - i64); - -define @intrinsic_vfcvt_x.f.v_nxv16i32_nxv16f32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv16i32_nxv16f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu -; CHECK-NEXT: vfcvt.x.f.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.x.f.v.nxv16i32.nxv16f32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.x.f.v.mask.nxv16i32.nxv16f32( - , - , - , - i64, - i64); - -define @intrinsic_vfcvt_mask_x.f.v_nxv16i32_nxv16f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv16i32_nxv16f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu -; CHECK-NEXT: vfcvt.x.f.v v8, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.x.f.v.mask.nxv16i32.nxv16f32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.x.f.v.nxv1i64.nxv1f64( - , - i64); - -define @intrinsic_vfcvt_x.f.v_nxv1i64_nxv1f64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv1i64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vfcvt.x.f.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.x.f.v.nxv1i64.nxv1f64( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.x.f.v.mask.nxv1i64.nxv1f64( - , - , - , - i64, - i64); - -define @intrinsic_vfcvt_mask_x.f.v_nxv1i64_nxv1f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv1i64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vfcvt.x.f.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.x.f.v.mask.nxv1i64.nxv1f64( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.x.f.v.nxv2i64.nxv2f64( - , - i64); - -define @intrinsic_vfcvt_x.f.v_nxv2i64_nxv2f64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv2i64_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vfcvt.x.f.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.x.f.v.nxv2i64.nxv2f64( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.x.f.v.mask.nxv2i64.nxv2f64( - , - , - , - i64, - i64); - -define @intrinsic_vfcvt_mask_x.f.v_nxv2i64_nxv2f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv2i64_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vfcvt.x.f.v v8, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.x.f.v.mask.nxv2i64.nxv2f64( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.x.f.v.nxv4i64.nxv4f64( - , - i64); - -define @intrinsic_vfcvt_x.f.v_nxv4i64_nxv4f64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv4i64_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vfcvt.x.f.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.x.f.v.nxv4i64.nxv4f64( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.x.f.v.mask.nxv4i64.nxv4f64( - , - , - , - i64, - i64); - -define @intrinsic_vfcvt_mask_x.f.v_nxv4i64_nxv4f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv4i64_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vfcvt.x.f.v v8, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.x.f.v.mask.nxv4i64.nxv4f64( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.x.f.v.nxv8i64.nxv8f64( - , - i64); - -define @intrinsic_vfcvt_x.f.v_nxv8i64_nxv8f64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv8i64_nxv8f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu -; CHECK-NEXT: vfcvt.x.f.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.x.f.v.nxv8i64.nxv8f64( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.x.f.v.mask.nxv8i64.nxv8f64( - , - , - , - i64, - i64); - -define @intrinsic_vfcvt_mask_x.f.v_nxv8i64_nxv8f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv8i64_nxv8f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu -; CHECK-NEXT: vfcvt.x.f.v v8, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.x.f.v.mask.nxv8i64.nxv8f64( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfcvt-x-f-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfcvt-x-f.ll similarity index 87% rename from llvm/test/CodeGen/RISCV/rvv/vfcvt-x-f-rv32.ll rename to llvm/test/CodeGen/RISCV/rvv/vfcvt-x-f.ll index 26632717dfa9f06af1f54ec281f305d11ad8b8c2..88205bb75ce3d3a3a04658cefa5453cd71730fa3 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfcvt-x-f-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfcvt-x-f.ll @@ -1,11 +1,13 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfcvt.x.f.v.nxv1i16.nxv1f16( , - i32); + iXLen); -define @intrinsic_vfcvt_x.f.v_nxv1i16_nxv1f16( %0, i32 %1) nounwind { +define @intrinsic_vfcvt_x.f.v_nxv1i16_nxv1f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv1i16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -14,7 +16,7 @@ define @intrinsic_vfcvt_x.f.v_nxv1i16_nxv1f16( @llvm.riscv.vfcvt.x.f.v.nxv1i16.nxv1f16( %0, - i32 %1) + iXLen %1) ret %a } @@ -23,10 +25,10 @@ declare @llvm.riscv.vfcvt.x.f.v.mask.nxv1i16.nxv1f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_x.f.v_nxv1i16_nxv1f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfcvt_mask_x.f.v_nxv1i16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv1i16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -37,16 +39,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.x.f.v.nxv2i16.nxv2f16( , - i32); + iXLen); -define @intrinsic_vfcvt_x.f.v_nxv2i16_nxv2f16( %0, i32 %1) nounwind { +define @intrinsic_vfcvt_x.f.v_nxv2i16_nxv2f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv2i16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -55,7 +57,7 @@ define @intrinsic_vfcvt_x.f.v_nxv2i16_nxv2f16( @llvm.riscv.vfcvt.x.f.v.nxv2i16.nxv2f16( %0, - i32 %1) + iXLen %1) ret %a } @@ -64,10 +66,10 @@ declare @llvm.riscv.vfcvt.x.f.v.mask.nxv2i16.nxv2f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_x.f.v_nxv2i16_nxv2f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfcvt_mask_x.f.v_nxv2i16_nxv2f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv2i16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -78,16 +80,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.x.f.v.nxv4i16.nxv4f16( , - i32); + iXLen); -define @intrinsic_vfcvt_x.f.v_nxv4i16_nxv4f16( %0, i32 %1) nounwind { +define @intrinsic_vfcvt_x.f.v_nxv4i16_nxv4f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv4i16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -96,7 +98,7 @@ define @intrinsic_vfcvt_x.f.v_nxv4i16_nxv4f16( @llvm.riscv.vfcvt.x.f.v.nxv4i16.nxv4f16( %0, - i32 %1) + iXLen %1) ret %a } @@ -105,10 +107,10 @@ declare @llvm.riscv.vfcvt.x.f.v.mask.nxv4i16.nxv4f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_x.f.v_nxv4i16_nxv4f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfcvt_mask_x.f.v_nxv4i16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv4i16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -119,16 +121,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.x.f.v.nxv8i16.nxv8f16( , - i32); + iXLen); -define @intrinsic_vfcvt_x.f.v_nxv8i16_nxv8f16( %0, i32 %1) nounwind { +define @intrinsic_vfcvt_x.f.v_nxv8i16_nxv8f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv8i16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -137,7 +139,7 @@ define @intrinsic_vfcvt_x.f.v_nxv8i16_nxv8f16( @llvm.riscv.vfcvt.x.f.v.nxv8i16.nxv8f16( %0, - i32 %1) + iXLen %1) ret %a } @@ -146,10 +148,10 @@ declare @llvm.riscv.vfcvt.x.f.v.mask.nxv8i16.nxv8f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_x.f.v_nxv8i16_nxv8f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfcvt_mask_x.f.v_nxv8i16_nxv8f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv8i16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -160,16 +162,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.x.f.v.nxv16i16.nxv16f16( , - i32); + iXLen); -define @intrinsic_vfcvt_x.f.v_nxv16i16_nxv16f16( %0, i32 %1) nounwind { +define @intrinsic_vfcvt_x.f.v_nxv16i16_nxv16f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv16i16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -178,7 +180,7 @@ define @intrinsic_vfcvt_x.f.v_nxv16i16_nxv16f16( @llvm.riscv.vfcvt.x.f.v.nxv16i16.nxv16f16( %0, - i32 %1) + iXLen %1) ret %a } @@ -187,10 +189,10 @@ declare @llvm.riscv.vfcvt.x.f.v.mask.nxv16i16.nxv16f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_x.f.v_nxv16i16_nxv16f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfcvt_mask_x.f.v_nxv16i16_nxv16f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv16i16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -201,16 +203,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.x.f.v.nxv32i16.nxv32f16( , - i32); + iXLen); -define @intrinsic_vfcvt_x.f.v_nxv32i16_nxv32f16( %0, i32 %1) nounwind { +define @intrinsic_vfcvt_x.f.v_nxv32i16_nxv32f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv32i16_nxv32f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu @@ -219,7 +221,7 @@ define @intrinsic_vfcvt_x.f.v_nxv32i16_nxv32f16( @llvm.riscv.vfcvt.x.f.v.nxv32i16.nxv32f16( %0, - i32 %1) + iXLen %1) ret %a } @@ -228,10 +230,10 @@ declare @llvm.riscv.vfcvt.x.f.v.mask.nxv32i16.nxv32f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_x.f.v_nxv32i16_nxv32f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfcvt_mask_x.f.v_nxv32i16_nxv32f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv32i16_nxv32f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu @@ -242,16 +244,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.x.f.v.nxv1i32.nxv1f32( , - i32); + iXLen); -define @intrinsic_vfcvt_x.f.v_nxv1i32_nxv1f32( %0, i32 %1) nounwind { +define @intrinsic_vfcvt_x.f.v_nxv1i32_nxv1f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv1i32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -260,7 +262,7 @@ define @intrinsic_vfcvt_x.f.v_nxv1i32_nxv1f32( @llvm.riscv.vfcvt.x.f.v.nxv1i32.nxv1f32( %0, - i32 %1) + iXLen %1) ret %a } @@ -269,10 +271,10 @@ declare @llvm.riscv.vfcvt.x.f.v.mask.nxv1i32.nxv1f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_x.f.v_nxv1i32_nxv1f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfcvt_mask_x.f.v_nxv1i32_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv1i32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -283,16 +285,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.x.f.v.nxv2i32.nxv2f32( , - i32); + iXLen); -define @intrinsic_vfcvt_x.f.v_nxv2i32_nxv2f32( %0, i32 %1) nounwind { +define @intrinsic_vfcvt_x.f.v_nxv2i32_nxv2f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv2i32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -301,7 +303,7 @@ define @intrinsic_vfcvt_x.f.v_nxv2i32_nxv2f32( @llvm.riscv.vfcvt.x.f.v.nxv2i32.nxv2f32( %0, - i32 %1) + iXLen %1) ret %a } @@ -310,10 +312,10 @@ declare @llvm.riscv.vfcvt.x.f.v.mask.nxv2i32.nxv2f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_x.f.v_nxv2i32_nxv2f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfcvt_mask_x.f.v_nxv2i32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv2i32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -324,16 +326,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.x.f.v.nxv4i32.nxv4f32( , - i32); + iXLen); -define @intrinsic_vfcvt_x.f.v_nxv4i32_nxv4f32( %0, i32 %1) nounwind { +define @intrinsic_vfcvt_x.f.v_nxv4i32_nxv4f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv4i32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -342,7 +344,7 @@ define @intrinsic_vfcvt_x.f.v_nxv4i32_nxv4f32( @llvm.riscv.vfcvt.x.f.v.nxv4i32.nxv4f32( %0, - i32 %1) + iXLen %1) ret %a } @@ -351,10 +353,10 @@ declare @llvm.riscv.vfcvt.x.f.v.mask.nxv4i32.nxv4f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_x.f.v_nxv4i32_nxv4f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfcvt_mask_x.f.v_nxv4i32_nxv4f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv4i32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -365,16 +367,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.x.f.v.nxv8i32.nxv8f32( , - i32); + iXLen); -define @intrinsic_vfcvt_x.f.v_nxv8i32_nxv8f32( %0, i32 %1) nounwind { +define @intrinsic_vfcvt_x.f.v_nxv8i32_nxv8f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv8i32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -383,7 +385,7 @@ define @intrinsic_vfcvt_x.f.v_nxv8i32_nxv8f32( @llvm.riscv.vfcvt.x.f.v.nxv8i32.nxv8f32( %0, - i32 %1) + iXLen %1) ret %a } @@ -392,10 +394,10 @@ declare @llvm.riscv.vfcvt.x.f.v.mask.nxv8i32.nxv8f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_x.f.v_nxv8i32_nxv8f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfcvt_mask_x.f.v_nxv8i32_nxv8f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv8i32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -406,16 +408,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.x.f.v.nxv16i32.nxv16f32( , - i32); + iXLen); -define @intrinsic_vfcvt_x.f.v_nxv16i32_nxv16f32( %0, i32 %1) nounwind { +define @intrinsic_vfcvt_x.f.v_nxv16i32_nxv16f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv16i32_nxv16f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu @@ -424,7 +426,7 @@ define @intrinsic_vfcvt_x.f.v_nxv16i32_nxv16f32( @llvm.riscv.vfcvt.x.f.v.nxv16i32.nxv16f32( %0, - i32 %1) + iXLen %1) ret %a } @@ -433,10 +435,10 @@ declare @llvm.riscv.vfcvt.x.f.v.mask.nxv16i32.nxv16f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_x.f.v_nxv16i32_nxv16f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfcvt_mask_x.f.v_nxv16i32_nxv16f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv16i32_nxv16f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu @@ -447,16 +449,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.x.f.v.nxv1i64.nxv1f64( , - i32); + iXLen); -define @intrinsic_vfcvt_x.f.v_nxv1i64_nxv1f64( %0, i32 %1) nounwind { +define @intrinsic_vfcvt_x.f.v_nxv1i64_nxv1f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv1i64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu @@ -465,7 +467,7 @@ define @intrinsic_vfcvt_x.f.v_nxv1i64_nxv1f64( @llvm.riscv.vfcvt.x.f.v.nxv1i64.nxv1f64( %0, - i32 %1) + iXLen %1) ret %a } @@ -474,10 +476,10 @@ declare @llvm.riscv.vfcvt.x.f.v.mask.nxv1i64.nxv1f64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_x.f.v_nxv1i64_nxv1f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfcvt_mask_x.f.v_nxv1i64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv1i64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu @@ -488,16 +490,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.x.f.v.nxv2i64.nxv2f64( , - i32); + iXLen); -define @intrinsic_vfcvt_x.f.v_nxv2i64_nxv2f64( %0, i32 %1) nounwind { +define @intrinsic_vfcvt_x.f.v_nxv2i64_nxv2f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv2i64_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu @@ -506,7 +508,7 @@ define @intrinsic_vfcvt_x.f.v_nxv2i64_nxv2f64( @llvm.riscv.vfcvt.x.f.v.nxv2i64.nxv2f64( %0, - i32 %1) + iXLen %1) ret %a } @@ -515,10 +517,10 @@ declare @llvm.riscv.vfcvt.x.f.v.mask.nxv2i64.nxv2f64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_x.f.v_nxv2i64_nxv2f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfcvt_mask_x.f.v_nxv2i64_nxv2f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv2i64_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu @@ -529,16 +531,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.x.f.v.nxv4i64.nxv4f64( , - i32); + iXLen); -define @intrinsic_vfcvt_x.f.v_nxv4i64_nxv4f64( %0, i32 %1) nounwind { +define @intrinsic_vfcvt_x.f.v_nxv4i64_nxv4f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv4i64_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu @@ -547,7 +549,7 @@ define @intrinsic_vfcvt_x.f.v_nxv4i64_nxv4f64( @llvm.riscv.vfcvt.x.f.v.nxv4i64.nxv4f64( %0, - i32 %1) + iXLen %1) ret %a } @@ -556,10 +558,10 @@ declare @llvm.riscv.vfcvt.x.f.v.mask.nxv4i64.nxv4f64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_x.f.v_nxv4i64_nxv4f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfcvt_mask_x.f.v_nxv4i64_nxv4f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv4i64_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu @@ -570,16 +572,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.x.f.v.nxv8i64.nxv8f64( , - i32); + iXLen); -define @intrinsic_vfcvt_x.f.v_nxv8i64_nxv8f64( %0, i32 %1) nounwind { +define @intrinsic_vfcvt_x.f.v_nxv8i64_nxv8f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv8i64_nxv8f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu @@ -588,7 +590,7 @@ define @intrinsic_vfcvt_x.f.v_nxv8i64_nxv8f64( @llvm.riscv.vfcvt.x.f.v.nxv8i64.nxv8f64( %0, - i32 %1) + iXLen %1) ret %a } @@ -597,10 +599,10 @@ declare @llvm.riscv.vfcvt.x.f.v.mask.nxv8i64.nxv8f64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_x.f.v_nxv8i64_nxv8f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfcvt_mask_x.f.v_nxv8i64_nxv8f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv8i64_nxv8f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu @@ -611,7 +613,7 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfcvt-xu-f-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfcvt-xu-f-rv64.ll deleted file mode 100644 index 8fb44d2d1ecf0d090133eef2e4769b349084e6b5..0000000000000000000000000000000000000000 --- a/llvm/test/CodeGen/RISCV/rvv/vfcvt-xu-f-rv64.ll +++ /dev/null @@ -1,617 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s -declare @llvm.riscv.vfcvt.xu.f.v.nxv1i16.nxv1f16( - , - i64); - -define @intrinsic_vfcvt_xu.f.v_nxv1i16_nxv1f16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv1i16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfcvt.xu.f.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.xu.f.v.nxv1i16.nxv1f16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i16.nxv1f16( - , - , - , - i64, - i64); - -define @intrinsic_vfcvt_mask_xu.f.v_nxv1i16_nxv1f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv1i16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfcvt.xu.f.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i16.nxv1f16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.xu.f.v.nxv2i16.nxv2f16( - , - i64); - -define @intrinsic_vfcvt_xu.f.v_nxv2i16_nxv2f16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv2i16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfcvt.xu.f.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.xu.f.v.nxv2i16.nxv2f16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.xu.f.v.mask.nxv2i16.nxv2f16( - , - , - , - i64, - i64); - -define @intrinsic_vfcvt_mask_xu.f.v_nxv2i16_nxv2f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv2i16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfcvt.xu.f.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv2i16.nxv2f16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.xu.f.v.nxv4i16.nxv4f16( - , - i64); - -define @intrinsic_vfcvt_xu.f.v_nxv4i16_nxv4f16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv4i16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfcvt.xu.f.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.xu.f.v.nxv4i16.nxv4f16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.xu.f.v.mask.nxv4i16.nxv4f16( - , - , - , - i64, - i64); - -define @intrinsic_vfcvt_mask_xu.f.v_nxv4i16_nxv4f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv4i16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfcvt.xu.f.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv4i16.nxv4f16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.xu.f.v.nxv8i16.nxv8f16( - , - i64); - -define @intrinsic_vfcvt_xu.f.v_nxv8i16_nxv8f16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv8i16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfcvt.xu.f.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.xu.f.v.nxv8i16.nxv8f16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.xu.f.v.mask.nxv8i16.nxv8f16( - , - , - , - i64, - i64); - -define @intrinsic_vfcvt_mask_xu.f.v_nxv8i16_nxv8f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv8i16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfcvt.xu.f.v v8, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv8i16.nxv8f16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.xu.f.v.nxv16i16.nxv16f16( - , - i64); - -define @intrinsic_vfcvt_xu.f.v_nxv16i16_nxv16f16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv16i16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfcvt.xu.f.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.xu.f.v.nxv16i16.nxv16f16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.xu.f.v.mask.nxv16i16.nxv16f16( - , - , - , - i64, - i64); - -define @intrinsic_vfcvt_mask_xu.f.v_nxv16i16_nxv16f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv16i16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfcvt.xu.f.v v8, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv16i16.nxv16f16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.xu.f.v.nxv32i16.nxv32f16( - , - i64); - -define @intrinsic_vfcvt_xu.f.v_nxv32i16_nxv32f16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv32i16_nxv32f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu -; CHECK-NEXT: vfcvt.xu.f.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.xu.f.v.nxv32i16.nxv32f16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.xu.f.v.mask.nxv32i16.nxv32f16( - , - , - , - i64, - i64); - -define @intrinsic_vfcvt_mask_xu.f.v_nxv32i16_nxv32f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv32i16_nxv32f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu -; CHECK-NEXT: vfcvt.xu.f.v v8, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv32i16.nxv32f16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.xu.f.v.nxv1i32.nxv1f32( - , - i64); - -define @intrinsic_vfcvt_xu.f.v_nxv1i32_nxv1f32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv1i32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfcvt.xu.f.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.xu.f.v.nxv1i32.nxv1f32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i32.nxv1f32( - , - , - , - i64, - i64); - -define @intrinsic_vfcvt_mask_xu.f.v_nxv1i32_nxv1f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv1i32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfcvt.xu.f.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i32.nxv1f32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.xu.f.v.nxv2i32.nxv2f32( - , - i64); - -define @intrinsic_vfcvt_xu.f.v_nxv2i32_nxv2f32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv2i32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfcvt.xu.f.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.xu.f.v.nxv2i32.nxv2f32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.xu.f.v.mask.nxv2i32.nxv2f32( - , - , - , - i64, - i64); - -define @intrinsic_vfcvt_mask_xu.f.v_nxv2i32_nxv2f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv2i32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfcvt.xu.f.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv2i32.nxv2f32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.xu.f.v.nxv4i32.nxv4f32( - , - i64); - -define @intrinsic_vfcvt_xu.f.v_nxv4i32_nxv4f32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv4i32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfcvt.xu.f.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.xu.f.v.nxv4i32.nxv4f32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.xu.f.v.mask.nxv4i32.nxv4f32( - , - , - , - i64, - i64); - -define @intrinsic_vfcvt_mask_xu.f.v_nxv4i32_nxv4f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv4i32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfcvt.xu.f.v v8, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv4i32.nxv4f32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.xu.f.v.nxv8i32.nxv8f32( - , - i64); - -define @intrinsic_vfcvt_xu.f.v_nxv8i32_nxv8f32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv8i32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfcvt.xu.f.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.xu.f.v.nxv8i32.nxv8f32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.xu.f.v.mask.nxv8i32.nxv8f32( - , - , - , - i64, - i64); - -define @intrinsic_vfcvt_mask_xu.f.v_nxv8i32_nxv8f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv8i32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfcvt.xu.f.v v8, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv8i32.nxv8f32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.xu.f.v.nxv16i32.nxv16f32( - , - i64); - -define @intrinsic_vfcvt_xu.f.v_nxv16i32_nxv16f32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv16i32_nxv16f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu -; CHECK-NEXT: vfcvt.xu.f.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.xu.f.v.nxv16i32.nxv16f32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.xu.f.v.mask.nxv16i32.nxv16f32( - , - , - , - i64, - i64); - -define @intrinsic_vfcvt_mask_xu.f.v_nxv16i32_nxv16f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv16i32_nxv16f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu -; CHECK-NEXT: vfcvt.xu.f.v v8, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv16i32.nxv16f32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.xu.f.v.nxv1i64.nxv1f64( - , - i64); - -define @intrinsic_vfcvt_xu.f.v_nxv1i64_nxv1f64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv1i64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vfcvt.xu.f.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.xu.f.v.nxv1i64.nxv1f64( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i64.nxv1f64( - , - , - , - i64, - i64); - -define @intrinsic_vfcvt_mask_xu.f.v_nxv1i64_nxv1f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv1i64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vfcvt.xu.f.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i64.nxv1f64( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.xu.f.v.nxv2i64.nxv2f64( - , - i64); - -define @intrinsic_vfcvt_xu.f.v_nxv2i64_nxv2f64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv2i64_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vfcvt.xu.f.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.xu.f.v.nxv2i64.nxv2f64( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.xu.f.v.mask.nxv2i64.nxv2f64( - , - , - , - i64, - i64); - -define @intrinsic_vfcvt_mask_xu.f.v_nxv2i64_nxv2f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv2i64_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vfcvt.xu.f.v v8, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv2i64.nxv2f64( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.xu.f.v.nxv4i64.nxv4f64( - , - i64); - -define @intrinsic_vfcvt_xu.f.v_nxv4i64_nxv4f64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv4i64_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vfcvt.xu.f.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.xu.f.v.nxv4i64.nxv4f64( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.xu.f.v.mask.nxv4i64.nxv4f64( - , - , - , - i64, - i64); - -define @intrinsic_vfcvt_mask_xu.f.v_nxv4i64_nxv4f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv4i64_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vfcvt.xu.f.v v8, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv4i64.nxv4f64( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.xu.f.v.nxv8i64.nxv8f64( - , - i64); - -define @intrinsic_vfcvt_xu.f.v_nxv8i64_nxv8f64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv8i64_nxv8f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu -; CHECK-NEXT: vfcvt.xu.f.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.xu.f.v.nxv8i64.nxv8f64( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.xu.f.v.mask.nxv8i64.nxv8f64( - , - , - , - i64, - i64); - -define @intrinsic_vfcvt_mask_xu.f.v_nxv8i64_nxv8f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv8i64_nxv8f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu -; CHECK-NEXT: vfcvt.xu.f.v v8, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv8i64.nxv8f64( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfcvt-xu-f-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfcvt-xu-f.ll similarity index 87% rename from llvm/test/CodeGen/RISCV/rvv/vfcvt-xu-f-rv32.ll rename to llvm/test/CodeGen/RISCV/rvv/vfcvt-xu-f.ll index e76b0db05446eca9febb769824b2da076112edaf..fb8a4797f0ae87fb71a0421112ec17974a244681 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfcvt-xu-f-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfcvt-xu-f.ll @@ -1,11 +1,13 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfcvt.xu.f.v.nxv1i16.nxv1f16( , - i32); + iXLen); -define @intrinsic_vfcvt_xu.f.v_nxv1i16_nxv1f16( %0, i32 %1) nounwind { +define @intrinsic_vfcvt_xu.f.v_nxv1i16_nxv1f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv1i16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -14,7 +16,7 @@ define @intrinsic_vfcvt_xu.f.v_nxv1i16_nxv1f16( @llvm.riscv.vfcvt.xu.f.v.nxv1i16.nxv1f16( %0, - i32 %1) + iXLen %1) ret %a } @@ -23,10 +25,10 @@ declare @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i16.nxv1f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_xu.f.v_nxv1i16_nxv1f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfcvt_mask_xu.f.v_nxv1i16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv1i16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -37,16 +39,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.xu.f.v.nxv2i16.nxv2f16( , - i32); + iXLen); -define @intrinsic_vfcvt_xu.f.v_nxv2i16_nxv2f16( %0, i32 %1) nounwind { +define @intrinsic_vfcvt_xu.f.v_nxv2i16_nxv2f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv2i16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -55,7 +57,7 @@ define @intrinsic_vfcvt_xu.f.v_nxv2i16_nxv2f16( @llvm.riscv.vfcvt.xu.f.v.nxv2i16.nxv2f16( %0, - i32 %1) + iXLen %1) ret %a } @@ -64,10 +66,10 @@ declare @llvm.riscv.vfcvt.xu.f.v.mask.nxv2i16.nxv2f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_xu.f.v_nxv2i16_nxv2f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfcvt_mask_xu.f.v_nxv2i16_nxv2f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv2i16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -78,16 +80,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.xu.f.v.nxv4i16.nxv4f16( , - i32); + iXLen); -define @intrinsic_vfcvt_xu.f.v_nxv4i16_nxv4f16( %0, i32 %1) nounwind { +define @intrinsic_vfcvt_xu.f.v_nxv4i16_nxv4f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv4i16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -96,7 +98,7 @@ define @intrinsic_vfcvt_xu.f.v_nxv4i16_nxv4f16( @llvm.riscv.vfcvt.xu.f.v.nxv4i16.nxv4f16( %0, - i32 %1) + iXLen %1) ret %a } @@ -105,10 +107,10 @@ declare @llvm.riscv.vfcvt.xu.f.v.mask.nxv4i16.nxv4f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_xu.f.v_nxv4i16_nxv4f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfcvt_mask_xu.f.v_nxv4i16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv4i16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -119,16 +121,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.xu.f.v.nxv8i16.nxv8f16( , - i32); + iXLen); -define @intrinsic_vfcvt_xu.f.v_nxv8i16_nxv8f16( %0, i32 %1) nounwind { +define @intrinsic_vfcvt_xu.f.v_nxv8i16_nxv8f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv8i16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -137,7 +139,7 @@ define @intrinsic_vfcvt_xu.f.v_nxv8i16_nxv8f16( @llvm.riscv.vfcvt.xu.f.v.nxv8i16.nxv8f16( %0, - i32 %1) + iXLen %1) ret %a } @@ -146,10 +148,10 @@ declare @llvm.riscv.vfcvt.xu.f.v.mask.nxv8i16.nxv8f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_xu.f.v_nxv8i16_nxv8f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfcvt_mask_xu.f.v_nxv8i16_nxv8f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv8i16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -160,16 +162,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.xu.f.v.nxv16i16.nxv16f16( , - i32); + iXLen); -define @intrinsic_vfcvt_xu.f.v_nxv16i16_nxv16f16( %0, i32 %1) nounwind { +define @intrinsic_vfcvt_xu.f.v_nxv16i16_nxv16f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv16i16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -178,7 +180,7 @@ define @intrinsic_vfcvt_xu.f.v_nxv16i16_nxv16f16( @llvm.riscv.vfcvt.xu.f.v.nxv16i16.nxv16f16( %0, - i32 %1) + iXLen %1) ret %a } @@ -187,10 +189,10 @@ declare @llvm.riscv.vfcvt.xu.f.v.mask.nxv16i16.nxv16f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_xu.f.v_nxv16i16_nxv16f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfcvt_mask_xu.f.v_nxv16i16_nxv16f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv16i16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -201,16 +203,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.xu.f.v.nxv32i16.nxv32f16( , - i32); + iXLen); -define @intrinsic_vfcvt_xu.f.v_nxv32i16_nxv32f16( %0, i32 %1) nounwind { +define @intrinsic_vfcvt_xu.f.v_nxv32i16_nxv32f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv32i16_nxv32f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu @@ -219,7 +221,7 @@ define @intrinsic_vfcvt_xu.f.v_nxv32i16_nxv32f16( @llvm.riscv.vfcvt.xu.f.v.nxv32i16.nxv32f16( %0, - i32 %1) + iXLen %1) ret %a } @@ -228,10 +230,10 @@ declare @llvm.riscv.vfcvt.xu.f.v.mask.nxv32i16.nxv32f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_xu.f.v_nxv32i16_nxv32f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfcvt_mask_xu.f.v_nxv32i16_nxv32f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv32i16_nxv32f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu @@ -242,16 +244,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.xu.f.v.nxv1i32.nxv1f32( , - i32); + iXLen); -define @intrinsic_vfcvt_xu.f.v_nxv1i32_nxv1f32( %0, i32 %1) nounwind { +define @intrinsic_vfcvt_xu.f.v_nxv1i32_nxv1f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv1i32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -260,7 +262,7 @@ define @intrinsic_vfcvt_xu.f.v_nxv1i32_nxv1f32( @llvm.riscv.vfcvt.xu.f.v.nxv1i32.nxv1f32( %0, - i32 %1) + iXLen %1) ret %a } @@ -269,10 +271,10 @@ declare @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i32.nxv1f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_xu.f.v_nxv1i32_nxv1f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfcvt_mask_xu.f.v_nxv1i32_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv1i32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -283,16 +285,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.xu.f.v.nxv2i32.nxv2f32( , - i32); + iXLen); -define @intrinsic_vfcvt_xu.f.v_nxv2i32_nxv2f32( %0, i32 %1) nounwind { +define @intrinsic_vfcvt_xu.f.v_nxv2i32_nxv2f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv2i32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -301,7 +303,7 @@ define @intrinsic_vfcvt_xu.f.v_nxv2i32_nxv2f32( @llvm.riscv.vfcvt.xu.f.v.nxv2i32.nxv2f32( %0, - i32 %1) + iXLen %1) ret %a } @@ -310,10 +312,10 @@ declare @llvm.riscv.vfcvt.xu.f.v.mask.nxv2i32.nxv2f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_xu.f.v_nxv2i32_nxv2f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfcvt_mask_xu.f.v_nxv2i32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv2i32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -324,16 +326,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.xu.f.v.nxv4i32.nxv4f32( , - i32); + iXLen); -define @intrinsic_vfcvt_xu.f.v_nxv4i32_nxv4f32( %0, i32 %1) nounwind { +define @intrinsic_vfcvt_xu.f.v_nxv4i32_nxv4f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv4i32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -342,7 +344,7 @@ define @intrinsic_vfcvt_xu.f.v_nxv4i32_nxv4f32( @llvm.riscv.vfcvt.xu.f.v.nxv4i32.nxv4f32( %0, - i32 %1) + iXLen %1) ret %a } @@ -351,10 +353,10 @@ declare @llvm.riscv.vfcvt.xu.f.v.mask.nxv4i32.nxv4f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_xu.f.v_nxv4i32_nxv4f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfcvt_mask_xu.f.v_nxv4i32_nxv4f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv4i32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -365,16 +367,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.xu.f.v.nxv8i32.nxv8f32( , - i32); + iXLen); -define @intrinsic_vfcvt_xu.f.v_nxv8i32_nxv8f32( %0, i32 %1) nounwind { +define @intrinsic_vfcvt_xu.f.v_nxv8i32_nxv8f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv8i32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -383,7 +385,7 @@ define @intrinsic_vfcvt_xu.f.v_nxv8i32_nxv8f32( @llvm.riscv.vfcvt.xu.f.v.nxv8i32.nxv8f32( %0, - i32 %1) + iXLen %1) ret %a } @@ -392,10 +394,10 @@ declare @llvm.riscv.vfcvt.xu.f.v.mask.nxv8i32.nxv8f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_xu.f.v_nxv8i32_nxv8f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfcvt_mask_xu.f.v_nxv8i32_nxv8f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv8i32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -406,16 +408,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.xu.f.v.nxv16i32.nxv16f32( , - i32); + iXLen); -define @intrinsic_vfcvt_xu.f.v_nxv16i32_nxv16f32( %0, i32 %1) nounwind { +define @intrinsic_vfcvt_xu.f.v_nxv16i32_nxv16f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv16i32_nxv16f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu @@ -424,7 +426,7 @@ define @intrinsic_vfcvt_xu.f.v_nxv16i32_nxv16f32( @llvm.riscv.vfcvt.xu.f.v.nxv16i32.nxv16f32( %0, - i32 %1) + iXLen %1) ret %a } @@ -433,10 +435,10 @@ declare @llvm.riscv.vfcvt.xu.f.v.mask.nxv16i32.nxv16f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_xu.f.v_nxv16i32_nxv16f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfcvt_mask_xu.f.v_nxv16i32_nxv16f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv16i32_nxv16f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu @@ -447,16 +449,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.xu.f.v.nxv1i64.nxv1f64( , - i32); + iXLen); -define @intrinsic_vfcvt_xu.f.v_nxv1i64_nxv1f64( %0, i32 %1) nounwind { +define @intrinsic_vfcvt_xu.f.v_nxv1i64_nxv1f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv1i64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu @@ -465,7 +467,7 @@ define @intrinsic_vfcvt_xu.f.v_nxv1i64_nxv1f64( @llvm.riscv.vfcvt.xu.f.v.nxv1i64.nxv1f64( %0, - i32 %1) + iXLen %1) ret %a } @@ -474,10 +476,10 @@ declare @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i64.nxv1f64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_xu.f.v_nxv1i64_nxv1f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfcvt_mask_xu.f.v_nxv1i64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv1i64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu @@ -488,16 +490,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.xu.f.v.nxv2i64.nxv2f64( , - i32); + iXLen); -define @intrinsic_vfcvt_xu.f.v_nxv2i64_nxv2f64( %0, i32 %1) nounwind { +define @intrinsic_vfcvt_xu.f.v_nxv2i64_nxv2f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv2i64_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu @@ -506,7 +508,7 @@ define @intrinsic_vfcvt_xu.f.v_nxv2i64_nxv2f64( @llvm.riscv.vfcvt.xu.f.v.nxv2i64.nxv2f64( %0, - i32 %1) + iXLen %1) ret %a } @@ -515,10 +517,10 @@ declare @llvm.riscv.vfcvt.xu.f.v.mask.nxv2i64.nxv2f64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_xu.f.v_nxv2i64_nxv2f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfcvt_mask_xu.f.v_nxv2i64_nxv2f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv2i64_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu @@ -529,16 +531,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.xu.f.v.nxv4i64.nxv4f64( , - i32); + iXLen); -define @intrinsic_vfcvt_xu.f.v_nxv4i64_nxv4f64( %0, i32 %1) nounwind { +define @intrinsic_vfcvt_xu.f.v_nxv4i64_nxv4f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv4i64_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu @@ -547,7 +549,7 @@ define @intrinsic_vfcvt_xu.f.v_nxv4i64_nxv4f64( @llvm.riscv.vfcvt.xu.f.v.nxv4i64.nxv4f64( %0, - i32 %1) + iXLen %1) ret %a } @@ -556,10 +558,10 @@ declare @llvm.riscv.vfcvt.xu.f.v.mask.nxv4i64.nxv4f64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_xu.f.v_nxv4i64_nxv4f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfcvt_mask_xu.f.v_nxv4i64_nxv4f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv4i64_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu @@ -570,16 +572,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.xu.f.v.nxv8i64.nxv8f64( , - i32); + iXLen); -define @intrinsic_vfcvt_xu.f.v_nxv8i64_nxv8f64( %0, i32 %1) nounwind { +define @intrinsic_vfcvt_xu.f.v_nxv8i64_nxv8f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv8i64_nxv8f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu @@ -588,7 +590,7 @@ define @intrinsic_vfcvt_xu.f.v_nxv8i64_nxv8f64( @llvm.riscv.vfcvt.xu.f.v.nxv8i64.nxv8f64( %0, - i32 %1) + iXLen %1) ret %a } @@ -597,10 +599,10 @@ declare @llvm.riscv.vfcvt.xu.f.v.mask.nxv8i64.nxv8f64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_xu.f.v_nxv8i64_nxv8f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfcvt_mask_xu.f.v_nxv8i64_nxv8f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv8i64_nxv8f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu @@ -611,7 +613,7 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfdiv-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfdiv-rv64.ll deleted file mode 100644 index 2d4a16e1bf4e88f5fe81fba806c5447d0d978c1f..0000000000000000000000000000000000000000 --- a/llvm/test/CodeGen/RISCV/rvv/vfdiv-rv64.ll +++ /dev/null @@ -1,1355 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: -target-abi=lp64d < %s | FileCheck %s -declare @llvm.riscv.vfdiv.nxv1f16( - , - , - i64); - -define @intrinsic_vfdiv_vv_nxv1f16_nxv1f16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfdiv_vv_nxv1f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfdiv.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfdiv.nxv1f16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfdiv.mask.nxv1f16( - , - , - , - , - i64, - i64); - -define @intrinsic_vfdiv_mask_vv_nxv1f16_nxv1f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv1f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfdiv.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfdiv.mask.nxv1f16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfdiv.nxv2f16( - , - , - i64); - -define @intrinsic_vfdiv_vv_nxv2f16_nxv2f16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfdiv_vv_nxv2f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfdiv.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfdiv.nxv2f16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfdiv.mask.nxv2f16( - , - , - , - , - i64, - i64); - -define @intrinsic_vfdiv_mask_vv_nxv2f16_nxv2f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv2f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfdiv.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfdiv.mask.nxv2f16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfdiv.nxv4f16( - , - , - i64); - -define @intrinsic_vfdiv_vv_nxv4f16_nxv4f16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfdiv_vv_nxv4f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfdiv.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfdiv.nxv4f16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfdiv.mask.nxv4f16( - , - , - , - , - i64, - i64); - -define @intrinsic_vfdiv_mask_vv_nxv4f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv4f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfdiv.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfdiv.mask.nxv4f16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfdiv.nxv8f16( - , - , - i64); - -define @intrinsic_vfdiv_vv_nxv8f16_nxv8f16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfdiv_vv_nxv8f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfdiv.vv v8, v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfdiv.nxv8f16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfdiv.mask.nxv8f16( - , - , - , - , - i64, - i64); - -define @intrinsic_vfdiv_mask_vv_nxv8f16_nxv8f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv8f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfdiv.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfdiv.mask.nxv8f16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfdiv.nxv16f16( - , - , - i64); - -define @intrinsic_vfdiv_vv_nxv16f16_nxv16f16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfdiv_vv_nxv16f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfdiv.vv v8, v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfdiv.nxv16f16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfdiv.mask.nxv16f16( - , - , - , - , - i64, - i64); - -define @intrinsic_vfdiv_mask_vv_nxv16f16_nxv16f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv16f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfdiv.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfdiv.mask.nxv16f16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfdiv.nxv32f16( - , - , - i64); - -define @intrinsic_vfdiv_vv_nxv32f16_nxv32f16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfdiv_vv_nxv32f16_nxv32f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu -; CHECK-NEXT: vfdiv.vv v8, v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfdiv.nxv32f16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfdiv.mask.nxv32f16( - , - , - , - , - i64, - i64); - -define @intrinsic_vfdiv_mask_vv_nxv32f16_nxv32f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv32f16_nxv32f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu -; CHECK-NEXT: vfdiv.vv v8, v16, v24, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfdiv.mask.nxv32f16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfdiv.nxv1f32( - , - , - i64); - -define @intrinsic_vfdiv_vv_nxv1f32_nxv1f32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfdiv_vv_nxv1f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfdiv.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfdiv.nxv1f32( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfdiv.mask.nxv1f32( - , - , - , - , - i64, - i64); - -define @intrinsic_vfdiv_mask_vv_nxv1f32_nxv1f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv1f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfdiv.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfdiv.mask.nxv1f32( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfdiv.nxv2f32( - , - , - i64); - -define @intrinsic_vfdiv_vv_nxv2f32_nxv2f32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfdiv_vv_nxv2f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfdiv.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfdiv.nxv2f32( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfdiv.mask.nxv2f32( - , - , - , - , - i64, - i64); - -define @intrinsic_vfdiv_mask_vv_nxv2f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv2f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfdiv.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfdiv.mask.nxv2f32( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfdiv.nxv4f32( - , - , - i64); - -define @intrinsic_vfdiv_vv_nxv4f32_nxv4f32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfdiv_vv_nxv4f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfdiv.vv v8, v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfdiv.nxv4f32( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfdiv.mask.nxv4f32( - , - , - , - , - i64, - i64); - -define @intrinsic_vfdiv_mask_vv_nxv4f32_nxv4f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv4f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfdiv.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfdiv.mask.nxv4f32( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfdiv.nxv8f32( - , - , - i64); - -define @intrinsic_vfdiv_vv_nxv8f32_nxv8f32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfdiv_vv_nxv8f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfdiv.vv v8, v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfdiv.nxv8f32( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfdiv.mask.nxv8f32( - , - , - , - , - i64, - i64); - -define @intrinsic_vfdiv_mask_vv_nxv8f32_nxv8f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv8f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfdiv.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfdiv.mask.nxv8f32( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfdiv.nxv16f32( - , - , - i64); - -define @intrinsic_vfdiv_vv_nxv16f32_nxv16f32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfdiv_vv_nxv16f32_nxv16f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu -; CHECK-NEXT: vfdiv.vv v8, v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfdiv.nxv16f32( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfdiv.mask.nxv16f32( - , - , - , - , - i64, - i64); - -define @intrinsic_vfdiv_mask_vv_nxv16f32_nxv16f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv16f32_nxv16f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vl8re32.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu -; CHECK-NEXT: vfdiv.vv v8, v16, v24, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfdiv.mask.nxv16f32( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfdiv.nxv1f64( - , - , - i64); - -define @intrinsic_vfdiv_vv_nxv1f64_nxv1f64( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfdiv_vv_nxv1f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vfdiv.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfdiv.nxv1f64( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfdiv.mask.nxv1f64( - , - , - , - , - i64, - i64); - -define @intrinsic_vfdiv_mask_vv_nxv1f64_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv1f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vfdiv.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfdiv.mask.nxv1f64( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfdiv.nxv2f64( - , - , - i64); - -define @intrinsic_vfdiv_vv_nxv2f64_nxv2f64( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfdiv_vv_nxv2f64_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vfdiv.vv v8, v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfdiv.nxv2f64( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfdiv.mask.nxv2f64( - , - , - , - , - i64, - i64); - -define @intrinsic_vfdiv_mask_vv_nxv2f64_nxv2f64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv2f64_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vfdiv.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfdiv.mask.nxv2f64( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfdiv.nxv4f64( - , - , - i64); - -define @intrinsic_vfdiv_vv_nxv4f64_nxv4f64( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfdiv_vv_nxv4f64_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vfdiv.vv v8, v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfdiv.nxv4f64( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfdiv.mask.nxv4f64( - , - , - , - , - i64, - i64); - -define @intrinsic_vfdiv_mask_vv_nxv4f64_nxv4f64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv4f64_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vfdiv.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfdiv.mask.nxv4f64( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfdiv.nxv8f64( - , - , - i64); - -define @intrinsic_vfdiv_vv_nxv8f64_nxv8f64( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfdiv_vv_nxv8f64_nxv8f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu -; CHECK-NEXT: vfdiv.vv v8, v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfdiv.nxv8f64( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfdiv.mask.nxv8f64( - , - , - , - , - i64, - i64); - -define @intrinsic_vfdiv_mask_vv_nxv8f64_nxv8f64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv8f64_nxv8f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vl8re64.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu -; CHECK-NEXT: vfdiv.vv v8, v16, v24, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfdiv.mask.nxv8f64( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfdiv.nxv1f16.f16( - , - half, - i64); - -define @intrinsic_vfdiv_vf_nxv1f16_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfdiv_vf_nxv1f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfdiv.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfdiv.nxv1f16.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfdiv.mask.nxv1f16.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfdiv_mask_vf_nxv1f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv1f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfdiv.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfdiv.mask.nxv1f16.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfdiv.nxv2f16.f16( - , - half, - i64); - -define @intrinsic_vfdiv_vf_nxv2f16_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfdiv_vf_nxv2f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfdiv.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfdiv.nxv2f16.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfdiv.mask.nxv2f16.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfdiv_mask_vf_nxv2f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv2f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfdiv.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfdiv.mask.nxv2f16.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfdiv.nxv4f16.f16( - , - half, - i64); - -define @intrinsic_vfdiv_vf_nxv4f16_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfdiv_vf_nxv4f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfdiv.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfdiv.nxv4f16.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfdiv.mask.nxv4f16.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfdiv_mask_vf_nxv4f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv4f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfdiv.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfdiv.mask.nxv4f16.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfdiv.nxv8f16.f16( - , - half, - i64); - -define @intrinsic_vfdiv_vf_nxv8f16_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfdiv_vf_nxv8f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfdiv.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfdiv.nxv8f16.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfdiv.mask.nxv8f16.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfdiv_mask_vf_nxv8f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv8f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfdiv.vf v8, v10, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfdiv.mask.nxv8f16.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfdiv.nxv16f16.f16( - , - half, - i64); - -define @intrinsic_vfdiv_vf_nxv16f16_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfdiv_vf_nxv16f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfdiv.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfdiv.nxv16f16.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfdiv.mask.nxv16f16.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfdiv_mask_vf_nxv16f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv16f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfdiv.vf v8, v12, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfdiv.mask.nxv16f16.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfdiv.nxv32f16.f16( - , - half, - i64); - -define @intrinsic_vfdiv_vf_nxv32f16_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfdiv_vf_nxv32f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu -; CHECK-NEXT: vfdiv.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfdiv.nxv32f16.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfdiv.mask.nxv32f16.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfdiv_mask_vf_nxv32f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv32f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu -; CHECK-NEXT: vfdiv.vf v8, v16, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfdiv.mask.nxv32f16.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfdiv.nxv1f32.f32( - , - float, - i64); - -define @intrinsic_vfdiv_vf_nxv1f32_f32( %0, float %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfdiv_vf_nxv1f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfdiv.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfdiv.nxv1f32.f32( - %0, - float %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfdiv.mask.nxv1f32.f32( - , - , - float, - , - i64, - i64); - -define @intrinsic_vfdiv_mask_vf_nxv1f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv1f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfdiv.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfdiv.mask.nxv1f32.f32( - %0, - %1, - float %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfdiv.nxv2f32.f32( - , - float, - i64); - -define @intrinsic_vfdiv_vf_nxv2f32_f32( %0, float %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfdiv_vf_nxv2f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfdiv.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfdiv.nxv2f32.f32( - %0, - float %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfdiv.mask.nxv2f32.f32( - , - , - float, - , - i64, - i64); - -define @intrinsic_vfdiv_mask_vf_nxv2f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv2f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfdiv.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfdiv.mask.nxv2f32.f32( - %0, - %1, - float %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfdiv.nxv4f32.f32( - , - float, - i64); - -define @intrinsic_vfdiv_vf_nxv4f32_f32( %0, float %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfdiv_vf_nxv4f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfdiv.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfdiv.nxv4f32.f32( - %0, - float %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfdiv.mask.nxv4f32.f32( - , - , - float, - , - i64, - i64); - -define @intrinsic_vfdiv_mask_vf_nxv4f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv4f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfdiv.vf v8, v10, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfdiv.mask.nxv4f32.f32( - %0, - %1, - float %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfdiv.nxv8f32.f32( - , - float, - i64); - -define @intrinsic_vfdiv_vf_nxv8f32_f32( %0, float %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfdiv_vf_nxv8f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfdiv.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfdiv.nxv8f32.f32( - %0, - float %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfdiv.mask.nxv8f32.f32( - , - , - float, - , - i64, - i64); - -define @intrinsic_vfdiv_mask_vf_nxv8f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv8f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfdiv.vf v8, v12, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfdiv.mask.nxv8f32.f32( - %0, - %1, - float %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfdiv.nxv16f32.f32( - , - float, - i64); - -define @intrinsic_vfdiv_vf_nxv16f32_f32( %0, float %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfdiv_vf_nxv16f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu -; CHECK-NEXT: vfdiv.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfdiv.nxv16f32.f32( - %0, - float %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfdiv.mask.nxv16f32.f32( - , - , - float, - , - i64, - i64); - -define @intrinsic_vfdiv_mask_vf_nxv16f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv16f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu -; CHECK-NEXT: vfdiv.vf v8, v16, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfdiv.mask.nxv16f32.f32( - %0, - %1, - float %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfdiv.nxv1f64.f64( - , - double, - i64); - -define @intrinsic_vfdiv_vf_nxv1f64_f64( %0, double %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfdiv_vf_nxv1f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vfdiv.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfdiv.nxv1f64.f64( - %0, - double %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfdiv.mask.nxv1f64.f64( - , - , - double, - , - i64, - i64); - -define @intrinsic_vfdiv_mask_vf_nxv1f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv1f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vfdiv.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfdiv.mask.nxv1f64.f64( - %0, - %1, - double %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfdiv.nxv2f64.f64( - , - double, - i64); - -define @intrinsic_vfdiv_vf_nxv2f64_f64( %0, double %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfdiv_vf_nxv2f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vfdiv.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfdiv.nxv2f64.f64( - %0, - double %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfdiv.mask.nxv2f64.f64( - , - , - double, - , - i64, - i64); - -define @intrinsic_vfdiv_mask_vf_nxv2f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv2f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vfdiv.vf v8, v10, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfdiv.mask.nxv2f64.f64( - %0, - %1, - double %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfdiv.nxv4f64.f64( - , - double, - i64); - -define @intrinsic_vfdiv_vf_nxv4f64_f64( %0, double %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfdiv_vf_nxv4f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vfdiv.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfdiv.nxv4f64.f64( - %0, - double %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfdiv.mask.nxv4f64.f64( - , - , - double, - , - i64, - i64); - -define @intrinsic_vfdiv_mask_vf_nxv4f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv4f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vfdiv.vf v8, v12, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfdiv.mask.nxv4f64.f64( - %0, - %1, - double %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfdiv.nxv8f64.f64( - , - double, - i64); - -define @intrinsic_vfdiv_vf_nxv8f64_f64( %0, double %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfdiv_vf_nxv8f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu -; CHECK-NEXT: vfdiv.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfdiv.nxv8f64.f64( - %0, - double %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfdiv.mask.nxv8f64.f64( - , - , - double, - , - i64, - i64); - -define @intrinsic_vfdiv_mask_vf_nxv8f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv8f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu -; CHECK-NEXT: vfdiv.vf v8, v16, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfdiv.mask.nxv8f64.f64( - %0, - %1, - double %2, - %3, - i64 %4, i64 1) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfdiv-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfdiv.ll similarity index 86% rename from llvm/test/CodeGen/RISCV/rvv/vfdiv-rv32.ll rename to llvm/test/CodeGen/RISCV/rvv/vfdiv.ll index 01bfb50ed9b498cf235c0a39170284d169ac0977..0145f2ad764ecadc93a5dbba6646c407edcad119 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfdiv-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfdiv.ll @@ -1,12 +1,14 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: -target-abi=ilp32d < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfdiv.nxv1f16.nxv1f16( , , - i32); + iXLen); -define @intrinsic_vfdiv_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, i32 %2) nounwind { +define @intrinsic_vfdiv_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -16,7 +18,7 @@ entry: %a = call @llvm.riscv.vfdiv.nxv1f16.nxv1f16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -26,10 +28,10 @@ declare @llvm.riscv.vfdiv.mask.nxv1f16.nxv1f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfdiv_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfdiv_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -41,7 +43,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -49,9 +51,9 @@ entry: declare @llvm.riscv.vfdiv.nxv2f16.nxv2f16( , , - i32); + iXLen); -define @intrinsic_vfdiv_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, i32 %2) nounwind { +define @intrinsic_vfdiv_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -61,7 +63,7 @@ entry: %a = call @llvm.riscv.vfdiv.nxv2f16.nxv2f16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -71,10 +73,10 @@ declare @llvm.riscv.vfdiv.mask.nxv2f16.nxv2f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfdiv_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfdiv_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -86,7 +88,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -94,9 +96,9 @@ entry: declare @llvm.riscv.vfdiv.nxv4f16.nxv4f16( , , - i32); + iXLen); -define @intrinsic_vfdiv_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, i32 %2) nounwind { +define @intrinsic_vfdiv_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -106,7 +108,7 @@ entry: %a = call @llvm.riscv.vfdiv.nxv4f16.nxv4f16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -116,10 +118,10 @@ declare @llvm.riscv.vfdiv.mask.nxv4f16.nxv4f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfdiv_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfdiv_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -131,7 +133,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -139,9 +141,9 @@ entry: declare @llvm.riscv.vfdiv.nxv8f16.nxv8f16( , , - i32); + iXLen); -define @intrinsic_vfdiv_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, i32 %2) nounwind { +define @intrinsic_vfdiv_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -151,7 +153,7 @@ entry: %a = call @llvm.riscv.vfdiv.nxv8f16.nxv8f16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -161,10 +163,10 @@ declare @llvm.riscv.vfdiv.mask.nxv8f16.nxv8f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfdiv_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfdiv_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -176,7 +178,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -184,9 +186,9 @@ entry: declare @llvm.riscv.vfdiv.nxv16f16.nxv16f16( , , - i32); + iXLen); -define @intrinsic_vfdiv_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, i32 %2) nounwind { +define @intrinsic_vfdiv_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -196,7 +198,7 @@ entry: %a = call @llvm.riscv.vfdiv.nxv16f16.nxv16f16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -206,10 +208,10 @@ declare @llvm.riscv.vfdiv.mask.nxv16f16.nxv16f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfdiv_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfdiv_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -221,7 +223,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -229,9 +231,9 @@ entry: declare @llvm.riscv.vfdiv.nxv32f16.nxv32f16( , , - i32); + iXLen); -define @intrinsic_vfdiv_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, i32 %2) nounwind { +define @intrinsic_vfdiv_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vv_nxv32f16_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu @@ -241,7 +243,7 @@ entry: %a = call @llvm.riscv.vfdiv.nxv32f16.nxv32f16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -251,10 +253,10 @@ declare @llvm.riscv.vfdiv.mask.nxv32f16.nxv32f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfdiv_mask_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfdiv_mask_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv32f16_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re16.v v24, (a0) @@ -267,7 +269,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -275,9 +277,9 @@ entry: declare @llvm.riscv.vfdiv.nxv1f32.nxv1f32( , , - i32); + iXLen); -define @intrinsic_vfdiv_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, i32 %2) nounwind { +define @intrinsic_vfdiv_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -287,7 +289,7 @@ entry: %a = call @llvm.riscv.vfdiv.nxv1f32.nxv1f32( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -297,10 +299,10 @@ declare @llvm.riscv.vfdiv.mask.nxv1f32.nxv1f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfdiv_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfdiv_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -312,7 +314,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -320,9 +322,9 @@ entry: declare @llvm.riscv.vfdiv.nxv2f32.nxv2f32( , , - i32); + iXLen); -define @intrinsic_vfdiv_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, i32 %2) nounwind { +define @intrinsic_vfdiv_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -332,7 +334,7 @@ entry: %a = call @llvm.riscv.vfdiv.nxv2f32.nxv2f32( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -342,10 +344,10 @@ declare @llvm.riscv.vfdiv.mask.nxv2f32.nxv2f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfdiv_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfdiv_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -357,7 +359,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -365,9 +367,9 @@ entry: declare @llvm.riscv.vfdiv.nxv4f32.nxv4f32( , , - i32); + iXLen); -define @intrinsic_vfdiv_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, i32 %2) nounwind { +define @intrinsic_vfdiv_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -377,7 +379,7 @@ entry: %a = call @llvm.riscv.vfdiv.nxv4f32.nxv4f32( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -387,10 +389,10 @@ declare @llvm.riscv.vfdiv.mask.nxv4f32.nxv4f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfdiv_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfdiv_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -402,7 +404,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -410,9 +412,9 @@ entry: declare @llvm.riscv.vfdiv.nxv8f32.nxv8f32( , , - i32); + iXLen); -define @intrinsic_vfdiv_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, i32 %2) nounwind { +define @intrinsic_vfdiv_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -422,7 +424,7 @@ entry: %a = call @llvm.riscv.vfdiv.nxv8f32.nxv8f32( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -432,10 +434,10 @@ declare @llvm.riscv.vfdiv.mask.nxv8f32.nxv8f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfdiv_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfdiv_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -447,7 +449,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -455,9 +457,9 @@ entry: declare @llvm.riscv.vfdiv.nxv16f32.nxv16f32( , , - i32); + iXLen); -define @intrinsic_vfdiv_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, i32 %2) nounwind { +define @intrinsic_vfdiv_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vv_nxv16f32_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu @@ -467,7 +469,7 @@ entry: %a = call @llvm.riscv.vfdiv.nxv16f32.nxv16f32( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -477,10 +479,10 @@ declare @llvm.riscv.vfdiv.mask.nxv16f32.nxv16f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfdiv_mask_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfdiv_mask_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv16f32_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re32.v v24, (a0) @@ -493,7 +495,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -501,9 +503,9 @@ entry: declare @llvm.riscv.vfdiv.nxv1f64.nxv1f64( , , - i32); + iXLen); -define @intrinsic_vfdiv_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, i32 %2) nounwind { +define @intrinsic_vfdiv_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu @@ -513,7 +515,7 @@ entry: %a = call @llvm.riscv.vfdiv.nxv1f64.nxv1f64( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -523,10 +525,10 @@ declare @llvm.riscv.vfdiv.mask.nxv1f64.nxv1f64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfdiv_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfdiv_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu @@ -538,7 +540,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -546,9 +548,9 @@ entry: declare @llvm.riscv.vfdiv.nxv2f64.nxv2f64( , , - i32); + iXLen); -define @intrinsic_vfdiv_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, i32 %2) nounwind { +define @intrinsic_vfdiv_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu @@ -558,7 +560,7 @@ entry: %a = call @llvm.riscv.vfdiv.nxv2f64.nxv2f64( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -568,10 +570,10 @@ declare @llvm.riscv.vfdiv.mask.nxv2f64.nxv2f64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfdiv_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfdiv_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu @@ -583,7 +585,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -591,9 +593,9 @@ entry: declare @llvm.riscv.vfdiv.nxv4f64.nxv4f64( , , - i32); + iXLen); -define @intrinsic_vfdiv_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, i32 %2) nounwind { +define @intrinsic_vfdiv_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu @@ -603,7 +605,7 @@ entry: %a = call @llvm.riscv.vfdiv.nxv4f64.nxv4f64( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -613,10 +615,10 @@ declare @llvm.riscv.vfdiv.mask.nxv4f64.nxv4f64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfdiv_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfdiv_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu @@ -628,7 +630,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -636,9 +638,9 @@ entry: declare @llvm.riscv.vfdiv.nxv8f64.nxv8f64( , , - i32); + iXLen); -define @intrinsic_vfdiv_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, i32 %2) nounwind { +define @intrinsic_vfdiv_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vv_nxv8f64_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu @@ -648,7 +650,7 @@ entry: %a = call @llvm.riscv.vfdiv.nxv8f64.nxv8f64( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -658,10 +660,10 @@ declare @llvm.riscv.vfdiv.mask.nxv8f64.nxv8f64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfdiv_mask_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfdiv_mask_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv8f64_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re64.v v24, (a0) @@ -674,7 +676,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -682,9 +684,9 @@ entry: declare @llvm.riscv.vfdiv.nxv1f16.f16( , half, - i32); + iXLen); -define @intrinsic_vfdiv_vf_nxv1f16_nxv1f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfdiv_vf_nxv1f16_nxv1f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -694,7 +696,7 @@ entry: %a = call @llvm.riscv.vfdiv.nxv1f16.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -704,10 +706,10 @@ declare @llvm.riscv.vfdiv.mask.nxv1f16.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfdiv_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfdiv_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -719,7 +721,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -727,9 +729,9 @@ entry: declare @llvm.riscv.vfdiv.nxv2f16.f16( , half, - i32); + iXLen); -define @intrinsic_vfdiv_vf_nxv2f16_nxv2f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfdiv_vf_nxv2f16_nxv2f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -739,7 +741,7 @@ entry: %a = call @llvm.riscv.vfdiv.nxv2f16.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -749,10 +751,10 @@ declare @llvm.riscv.vfdiv.mask.nxv2f16.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfdiv_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfdiv_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -764,7 +766,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -772,9 +774,9 @@ entry: declare @llvm.riscv.vfdiv.nxv4f16.f16( , half, - i32); + iXLen); -define @intrinsic_vfdiv_vf_nxv4f16_nxv4f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfdiv_vf_nxv4f16_nxv4f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -784,7 +786,7 @@ entry: %a = call @llvm.riscv.vfdiv.nxv4f16.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -794,10 +796,10 @@ declare @llvm.riscv.vfdiv.mask.nxv4f16.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfdiv_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfdiv_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -809,7 +811,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -817,9 +819,9 @@ entry: declare @llvm.riscv.vfdiv.nxv8f16.f16( , half, - i32); + iXLen); -define @intrinsic_vfdiv_vf_nxv8f16_nxv8f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfdiv_vf_nxv8f16_nxv8f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -829,7 +831,7 @@ entry: %a = call @llvm.riscv.vfdiv.nxv8f16.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -839,10 +841,10 @@ declare @llvm.riscv.vfdiv.mask.nxv8f16.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfdiv_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfdiv_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -854,7 +856,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -862,9 +864,9 @@ entry: declare @llvm.riscv.vfdiv.nxv16f16.f16( , half, - i32); + iXLen); -define @intrinsic_vfdiv_vf_nxv16f16_nxv16f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfdiv_vf_nxv16f16_nxv16f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -874,7 +876,7 @@ entry: %a = call @llvm.riscv.vfdiv.nxv16f16.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -884,10 +886,10 @@ declare @llvm.riscv.vfdiv.mask.nxv16f16.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfdiv_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfdiv_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -899,7 +901,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -907,9 +909,9 @@ entry: declare @llvm.riscv.vfdiv.nxv32f16.f16( , half, - i32); + iXLen); -define @intrinsic_vfdiv_vf_nxv32f16_nxv32f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfdiv_vf_nxv32f16_nxv32f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu @@ -919,7 +921,7 @@ entry: %a = call @llvm.riscv.vfdiv.nxv32f16.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -929,10 +931,10 @@ declare @llvm.riscv.vfdiv.mask.nxv32f16.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfdiv_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfdiv_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu @@ -944,7 +946,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -952,9 +954,9 @@ entry: declare @llvm.riscv.vfdiv.nxv1f32.f32( , float, - i32); + iXLen); -define @intrinsic_vfdiv_vf_nxv1f32_nxv1f32_f32( %0, float %1, i32 %2) nounwind { +define @intrinsic_vfdiv_vf_nxv1f32_nxv1f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -964,7 +966,7 @@ entry: %a = call @llvm.riscv.vfdiv.nxv1f32.f32( %0, float %1, - i32 %2) + iXLen %2) ret %a } @@ -974,10 +976,10 @@ declare @llvm.riscv.vfdiv.mask.nxv1f32.f32( , float, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfdiv_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +define @intrinsic_vfdiv_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -989,7 +991,7 @@ entry: %1, float %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -997,9 +999,9 @@ entry: declare @llvm.riscv.vfdiv.nxv2f32.f32( , float, - i32); + iXLen); -define @intrinsic_vfdiv_vf_nxv2f32_nxv2f32_f32( %0, float %1, i32 %2) nounwind { +define @intrinsic_vfdiv_vf_nxv2f32_nxv2f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -1009,7 +1011,7 @@ entry: %a = call @llvm.riscv.vfdiv.nxv2f32.f32( %0, float %1, - i32 %2) + iXLen %2) ret %a } @@ -1019,10 +1021,10 @@ declare @llvm.riscv.vfdiv.mask.nxv2f32.f32( , float, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfdiv_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +define @intrinsic_vfdiv_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -1034,7 +1036,7 @@ entry: %1, float %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1042,9 +1044,9 @@ entry: declare @llvm.riscv.vfdiv.nxv4f32.f32( , float, - i32); + iXLen); -define @intrinsic_vfdiv_vf_nxv4f32_nxv4f32_f32( %0, float %1, i32 %2) nounwind { +define @intrinsic_vfdiv_vf_nxv4f32_nxv4f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -1054,7 +1056,7 @@ entry: %a = call @llvm.riscv.vfdiv.nxv4f32.f32( %0, float %1, - i32 %2) + iXLen %2) ret %a } @@ -1064,10 +1066,10 @@ declare @llvm.riscv.vfdiv.mask.nxv4f32.f32( , float, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfdiv_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +define @intrinsic_vfdiv_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -1079,7 +1081,7 @@ entry: %1, float %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1087,9 +1089,9 @@ entry: declare @llvm.riscv.vfdiv.nxv8f32.f32( , float, - i32); + iXLen); -define @intrinsic_vfdiv_vf_nxv8f32_nxv8f32_f32( %0, float %1, i32 %2) nounwind { +define @intrinsic_vfdiv_vf_nxv8f32_nxv8f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -1099,7 +1101,7 @@ entry: %a = call @llvm.riscv.vfdiv.nxv8f32.f32( %0, float %1, - i32 %2) + iXLen %2) ret %a } @@ -1109,10 +1111,10 @@ declare @llvm.riscv.vfdiv.mask.nxv8f32.f32( , float, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfdiv_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +define @intrinsic_vfdiv_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -1124,7 +1126,7 @@ entry: %1, float %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1132,9 +1134,9 @@ entry: declare @llvm.riscv.vfdiv.nxv16f32.f32( , float, - i32); + iXLen); -define @intrinsic_vfdiv_vf_nxv16f32_nxv16f32_f32( %0, float %1, i32 %2) nounwind { +define @intrinsic_vfdiv_vf_nxv16f32_nxv16f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu @@ -1144,7 +1146,7 @@ entry: %a = call @llvm.riscv.vfdiv.nxv16f32.f32( %0, float %1, - i32 %2) + iXLen %2) ret %a } @@ -1154,10 +1156,10 @@ declare @llvm.riscv.vfdiv.mask.nxv16f32.f32( , float, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfdiv_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +define @intrinsic_vfdiv_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu @@ -1169,7 +1171,7 @@ entry: %1, float %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1177,9 +1179,9 @@ entry: declare @llvm.riscv.vfdiv.nxv1f64.f64( , double, - i32); + iXLen); -define @intrinsic_vfdiv_vf_nxv1f64_nxv1f64_f64( %0, double %1, i32 %2) nounwind { +define @intrinsic_vfdiv_vf_nxv1f64_nxv1f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu @@ -1189,7 +1191,7 @@ entry: %a = call @llvm.riscv.vfdiv.nxv1f64.f64( %0, double %1, - i32 %2) + iXLen %2) ret %a } @@ -1199,10 +1201,10 @@ declare @llvm.riscv.vfdiv.mask.nxv1f64.f64( , double, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfdiv_mask_vf_nxv1f64_nxv1f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { +define @intrinsic_vfdiv_mask_vf_nxv1f64_nxv1f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu @@ -1214,7 +1216,7 @@ entry: %1, double %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1222,9 +1224,9 @@ entry: declare @llvm.riscv.vfdiv.nxv2f64.f64( , double, - i32); + iXLen); -define @intrinsic_vfdiv_vf_nxv2f64_nxv2f64_f64( %0, double %1, i32 %2) nounwind { +define @intrinsic_vfdiv_vf_nxv2f64_nxv2f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu @@ -1234,7 +1236,7 @@ entry: %a = call @llvm.riscv.vfdiv.nxv2f64.f64( %0, double %1, - i32 %2) + iXLen %2) ret %a } @@ -1244,10 +1246,10 @@ declare @llvm.riscv.vfdiv.mask.nxv2f64.f64( , double, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfdiv_mask_vf_nxv2f64_nxv2f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { +define @intrinsic_vfdiv_mask_vf_nxv2f64_nxv2f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu @@ -1259,7 +1261,7 @@ entry: %1, double %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1267,9 +1269,9 @@ entry: declare @llvm.riscv.vfdiv.nxv4f64.f64( , double, - i32); + iXLen); -define @intrinsic_vfdiv_vf_nxv4f64_nxv4f64_f64( %0, double %1, i32 %2) nounwind { +define @intrinsic_vfdiv_vf_nxv4f64_nxv4f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu @@ -1279,7 +1281,7 @@ entry: %a = call @llvm.riscv.vfdiv.nxv4f64.f64( %0, double %1, - i32 %2) + iXLen %2) ret %a } @@ -1289,10 +1291,10 @@ declare @llvm.riscv.vfdiv.mask.nxv4f64.f64( , double, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfdiv_mask_vf_nxv4f64_nxv4f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { +define @intrinsic_vfdiv_mask_vf_nxv4f64_nxv4f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu @@ -1304,7 +1306,7 @@ entry: %1, double %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1312,9 +1314,9 @@ entry: declare @llvm.riscv.vfdiv.nxv8f64.f64( , double, - i32); + iXLen); -define @intrinsic_vfdiv_vf_nxv8f64_nxv8f64_f64( %0, double %1, i32 %2) nounwind { +define @intrinsic_vfdiv_vf_nxv8f64_nxv8f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu @@ -1324,7 +1326,7 @@ entry: %a = call @llvm.riscv.vfdiv.nxv8f64.f64( %0, double %1, - i32 %2) + iXLen %2) ret %a } @@ -1334,10 +1336,10 @@ declare @llvm.riscv.vfdiv.mask.nxv8f64.f64( , double, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfdiv_mask_vf_nxv8f64_nxv8f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { +define @intrinsic_vfdiv_mask_vf_nxv8f64_nxv8f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu @@ -1349,7 +1351,7 @@ entry: %1, double %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmacc-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfmacc-rv32.ll deleted file mode 100644 index 16d305bad846cf6e8dfcdd48f5fa9c28fb9f5588..0000000000000000000000000000000000000000 --- a/llvm/test/CodeGen/RISCV/rvv/vfmacc-rv32.ll +++ /dev/null @@ -1,1106 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: -target-abi=ilp32d < %s | FileCheck %s -declare @llvm.riscv.vfmacc.nxv1f16.nxv1f16( - , - , - , - i32); - -define @intrinsic_vfmacc_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmacc_vv_nxv1f16_nxv1f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu -; CHECK-NEXT: vfmacc.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmacc.nxv1f16.nxv1f16( - %0, - %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vfmacc.mask.nxv1f16.nxv1f16( - , - , - , - , - i32); - -define @intrinsic_vfmacc_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv1f16_nxv1f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu -; CHECK-NEXT: vfmacc.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmacc.mask.nxv1f16.nxv1f16( - %0, - %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vfmacc.nxv2f16.nxv2f16( - , - , - , - i32); - -define @intrinsic_vfmacc_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmacc_vv_nxv2f16_nxv2f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu -; CHECK-NEXT: vfmacc.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmacc.nxv2f16.nxv2f16( - %0, - %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vfmacc.mask.nxv2f16.nxv2f16( - , - , - , - , - i32); - -define @intrinsic_vfmacc_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv2f16_nxv2f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu -; CHECK-NEXT: vfmacc.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmacc.mask.nxv2f16.nxv2f16( - %0, - %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vfmacc.nxv4f16.nxv4f16( - , - , - , - i32); - -define @intrinsic_vfmacc_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmacc_vv_nxv4f16_nxv4f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu -; CHECK-NEXT: vfmacc.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmacc.nxv4f16.nxv4f16( - %0, - %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vfmacc.mask.nxv4f16.nxv4f16( - , - , - , - , - i32); - -define @intrinsic_vfmacc_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv4f16_nxv4f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu -; CHECK-NEXT: vfmacc.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmacc.mask.nxv4f16.nxv4f16( - %0, - %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vfmacc.nxv8f16.nxv8f16( - , - , - , - i32); - -define @intrinsic_vfmacc_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmacc_vv_nxv8f16_nxv8f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu -; CHECK-NEXT: vfmacc.vv v8, v10, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmacc.nxv8f16.nxv8f16( - %0, - %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vfmacc.mask.nxv8f16.nxv8f16( - , - , - , - , - i32); - -define @intrinsic_vfmacc_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv8f16_nxv8f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu -; CHECK-NEXT: vfmacc.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmacc.mask.nxv8f16.nxv8f16( - %0, - %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vfmacc.nxv16f16.nxv16f16( - , - , - , - i32); - -define @intrinsic_vfmacc_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmacc_vv_nxv16f16_nxv16f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu -; CHECK-NEXT: vfmacc.vv v8, v12, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmacc.nxv16f16.nxv16f16( - %0, - %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vfmacc.mask.nxv16f16.nxv16f16( - , - , - , - , - i32); - -define @intrinsic_vfmacc_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv16f16_nxv16f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu -; CHECK-NEXT: vfmacc.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmacc.mask.nxv16f16.nxv16f16( - %0, - %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vfmacc.nxv1f32.nxv1f32( - , - , - , - i32); - -define @intrinsic_vfmacc_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmacc_vv_nxv1f32_nxv1f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu -; CHECK-NEXT: vfmacc.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmacc.nxv1f32.nxv1f32( - %0, - %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vfmacc.mask.nxv1f32.nxv1f32( - , - , - , - , - i32); - -define @intrinsic_vfmacc_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv1f32_nxv1f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu -; CHECK-NEXT: vfmacc.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmacc.mask.nxv1f32.nxv1f32( - %0, - %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vfmacc.nxv2f32.nxv2f32( - , - , - , - i32); - -define @intrinsic_vfmacc_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmacc_vv_nxv2f32_nxv2f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu -; CHECK-NEXT: vfmacc.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmacc.nxv2f32.nxv2f32( - %0, - %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vfmacc.mask.nxv2f32.nxv2f32( - , - , - , - , - i32); - -define @intrinsic_vfmacc_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv2f32_nxv2f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu -; CHECK-NEXT: vfmacc.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmacc.mask.nxv2f32.nxv2f32( - %0, - %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vfmacc.nxv4f32.nxv4f32( - , - , - , - i32); - -define @intrinsic_vfmacc_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmacc_vv_nxv4f32_nxv4f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu -; CHECK-NEXT: vfmacc.vv v8, v10, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmacc.nxv4f32.nxv4f32( - %0, - %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vfmacc.mask.nxv4f32.nxv4f32( - , - , - , - , - i32); - -define @intrinsic_vfmacc_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv4f32_nxv4f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu -; CHECK-NEXT: vfmacc.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmacc.mask.nxv4f32.nxv4f32( - %0, - %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vfmacc.nxv8f32.nxv8f32( - , - , - , - i32); - -define @intrinsic_vfmacc_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmacc_vv_nxv8f32_nxv8f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu -; CHECK-NEXT: vfmacc.vv v8, v12, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmacc.nxv8f32.nxv8f32( - %0, - %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vfmacc.mask.nxv8f32.nxv8f32( - , - , - , - , - i32); - -define @intrinsic_vfmacc_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv8f32_nxv8f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu -; CHECK-NEXT: vfmacc.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmacc.mask.nxv8f32.nxv8f32( - %0, - %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vfmacc.nxv1f64.nxv1f64( - , - , - , - i32); - -define @intrinsic_vfmacc_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmacc_vv_nxv1f64_nxv1f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu -; CHECK-NEXT: vfmacc.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmacc.nxv1f64.nxv1f64( - %0, - %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vfmacc.mask.nxv1f64.nxv1f64( - , - , - , - , - i32); - -define @intrinsic_vfmacc_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv1f64_nxv1f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu -; CHECK-NEXT: vfmacc.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmacc.mask.nxv1f64.nxv1f64( - %0, - %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vfmacc.nxv2f64.nxv2f64( - , - , - , - i32); - -define @intrinsic_vfmacc_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmacc_vv_nxv2f64_nxv2f64_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu -; CHECK-NEXT: vfmacc.vv v8, v10, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmacc.nxv2f64.nxv2f64( - %0, - %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vfmacc.mask.nxv2f64.nxv2f64( - , - , - , - , - i32); - -define @intrinsic_vfmacc_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv2f64_nxv2f64_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu -; CHECK-NEXT: vfmacc.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmacc.mask.nxv2f64.nxv2f64( - %0, - %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vfmacc.nxv4f64.nxv4f64( - , - , - , - i32); - -define @intrinsic_vfmacc_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmacc_vv_nxv4f64_nxv4f64_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu -; CHECK-NEXT: vfmacc.vv v8, v12, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmacc.nxv4f64.nxv4f64( - %0, - %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vfmacc.mask.nxv4f64.nxv4f64( - , - , - , - , - i32); - -define @intrinsic_vfmacc_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv4f64_nxv4f64_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu -; CHECK-NEXT: vfmacc.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmacc.mask.nxv4f64.nxv4f64( - %0, - %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vfmacc.nxv1f16.f16( - , - half, - , - i32); - -define @intrinsic_vfmacc_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmacc_vf_nxv1f16_f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu -; CHECK-NEXT: vfmacc.vf v8, fa0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmacc.nxv1f16.f16( - %0, - half %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vfmacc.mask.nxv1f16.f16( - , - half, - , - , - i32); - -define @intrinsic_vfmacc_mask_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv1f16_f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu -; CHECK-NEXT: vfmacc.vf v8, fa0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmacc.mask.nxv1f16.f16( - %0, - half %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vfmacc.nxv2f16.f16( - , - half, - , - i32); - -define @intrinsic_vfmacc_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmacc_vf_nxv2f16_f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu -; CHECK-NEXT: vfmacc.vf v8, fa0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmacc.nxv2f16.f16( - %0, - half %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vfmacc.mask.nxv2f16.f16( - , - half, - , - , - i32); - -define @intrinsic_vfmacc_mask_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv2f16_f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu -; CHECK-NEXT: vfmacc.vf v8, fa0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmacc.mask.nxv2f16.f16( - %0, - half %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vfmacc.nxv4f16.f16( - , - half, - , - i32); - -define @intrinsic_vfmacc_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmacc_vf_nxv4f16_f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu -; CHECK-NEXT: vfmacc.vf v8, fa0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmacc.nxv4f16.f16( - %0, - half %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vfmacc.mask.nxv4f16.f16( - , - half, - , - , - i32); - -define @intrinsic_vfmacc_mask_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv4f16_f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu -; CHECK-NEXT: vfmacc.vf v8, fa0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmacc.mask.nxv4f16.f16( - %0, - half %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vfmacc.nxv8f16.f16( - , - half, - , - i32); - -define @intrinsic_vfmacc_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmacc_vf_nxv8f16_f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu -; CHECK-NEXT: vfmacc.vf v8, fa0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmacc.nxv8f16.f16( - %0, - half %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vfmacc.mask.nxv8f16.f16( - , - half, - , - , - i32); - -define @intrinsic_vfmacc_mask_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv8f16_f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu -; CHECK-NEXT: vfmacc.vf v8, fa0, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmacc.mask.nxv8f16.f16( - %0, - half %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vfmacc.nxv16f16.f16( - , - half, - , - i32); - -define @intrinsic_vfmacc_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmacc_vf_nxv16f16_f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu -; CHECK-NEXT: vfmacc.vf v8, fa0, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmacc.nxv16f16.f16( - %0, - half %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vfmacc.mask.nxv16f16.f16( - , - half, - , - , - i32); - -define @intrinsic_vfmacc_mask_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv16f16_f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu -; CHECK-NEXT: vfmacc.vf v8, fa0, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmacc.mask.nxv16f16.f16( - %0, - half %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vfmacc.nxv1f32.f32( - , - float, - , - i32); - -define @intrinsic_vfmacc_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmacc_vf_nxv1f32_f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu -; CHECK-NEXT: vfmacc.vf v8, fa0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmacc.nxv1f32.f32( - %0, - float %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vfmacc.mask.nxv1f32.f32( - , - float, - , - , - i32); - -define @intrinsic_vfmacc_mask_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv1f32_f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu -; CHECK-NEXT: vfmacc.vf v8, fa0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmacc.mask.nxv1f32.f32( - %0, - float %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vfmacc.nxv2f32.f32( - , - float, - , - i32); - -define @intrinsic_vfmacc_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmacc_vf_nxv2f32_f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu -; CHECK-NEXT: vfmacc.vf v8, fa0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmacc.nxv2f32.f32( - %0, - float %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vfmacc.mask.nxv2f32.f32( - , - float, - , - , - i32); - -define @intrinsic_vfmacc_mask_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv2f32_f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu -; CHECK-NEXT: vfmacc.vf v8, fa0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmacc.mask.nxv2f32.f32( - %0, - float %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vfmacc.nxv4f32.f32( - , - float, - , - i32); - -define @intrinsic_vfmacc_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmacc_vf_nxv4f32_f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu -; CHECK-NEXT: vfmacc.vf v8, fa0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmacc.nxv4f32.f32( - %0, - float %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vfmacc.mask.nxv4f32.f32( - , - float, - , - , - i32); - -define @intrinsic_vfmacc_mask_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv4f32_f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu -; CHECK-NEXT: vfmacc.vf v8, fa0, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmacc.mask.nxv4f32.f32( - %0, - float %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vfmacc.nxv8f32.f32( - , - float, - , - i32); - -define @intrinsic_vfmacc_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmacc_vf_nxv8f32_f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu -; CHECK-NEXT: vfmacc.vf v8, fa0, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmacc.nxv8f32.f32( - %0, - float %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vfmacc.mask.nxv8f32.f32( - , - float, - , - , - i32); - -define @intrinsic_vfmacc_mask_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv8f32_f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu -; CHECK-NEXT: vfmacc.vf v8, fa0, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmacc.mask.nxv8f32.f32( - %0, - float %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vfmacc.nxv1f64.f64( - , - double, - , - i32); - -define @intrinsic_vfmacc_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmacc_vf_nxv1f64_f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu -; CHECK-NEXT: vfmacc.vf v8, fa0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmacc.nxv1f64.f64( - %0, - double %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vfmacc.mask.nxv1f64.f64( - , - double, - , - , - i32); - -define @intrinsic_vfmacc_mask_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv1f64_f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu -; CHECK-NEXT: vfmacc.vf v8, fa0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmacc.mask.nxv1f64.f64( - %0, - double %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vfmacc.nxv2f64.f64( - , - double, - , - i32); - -define @intrinsic_vfmacc_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmacc_vf_nxv2f64_f64_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu -; CHECK-NEXT: vfmacc.vf v8, fa0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmacc.nxv2f64.f64( - %0, - double %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vfmacc.mask.nxv2f64.f64( - , - double, - , - , - i32); - -define @intrinsic_vfmacc_mask_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv2f64_f64_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu -; CHECK-NEXT: vfmacc.vf v8, fa0, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmacc.mask.nxv2f64.f64( - %0, - double %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vfmacc.nxv4f64.f64( - , - double, - , - i32); - -define @intrinsic_vfmacc_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmacc_vf_nxv4f64_f64_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu -; CHECK-NEXT: vfmacc.vf v8, fa0, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmacc.nxv4f64.f64( - %0, - double %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vfmacc.mask.nxv4f64.f64( - , - double, - , - , - i32); - -define @intrinsic_vfmacc_mask_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv4f64_f64_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu -; CHECK-NEXT: vfmacc.vf v8, fa0, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmacc.mask.nxv4f64.f64( - %0, - double %1, - %2, - %3, - i32 %4) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmacc-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfmacc.ll similarity index 89% rename from llvm/test/CodeGen/RISCV/rvv/vfmacc-rv64.ll rename to llvm/test/CodeGen/RISCV/rvv/vfmacc.ll index c5809888ff17b362b93bb55ee975e469c7e56445..5115a7548e2ceb81bf37d6687a09168cdb767031 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfmacc-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmacc.ll @@ -1,13 +1,15 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: -target-abi=lp64d < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfmacc.nxv1f16.nxv1f16( , , , - i64); + iXLen); -define @intrinsic_vfmacc_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vfmacc_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu @@ -18,7 +20,7 @@ entry: %0, %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -28,9 +30,9 @@ declare @llvm.riscv.vfmacc.mask.nxv1f16.nxv1f16( , , , - i64); + iXLen); -define @intrinsic_vfmacc_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfmacc_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu @@ -42,7 +44,7 @@ entry: %1, %2, %3, - i64 %4) + iXLen %4) ret %a } @@ -51,9 +53,9 @@ declare @llvm.riscv.vfmacc.nxv2f16.nxv2f16( , , , - i64); + iXLen); -define @intrinsic_vfmacc_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vfmacc_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu @@ -64,7 +66,7 @@ entry: %0, %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -74,9 +76,9 @@ declare @llvm.riscv.vfmacc.mask.nxv2f16.nxv2f16( , , , - i64); + iXLen); -define @intrinsic_vfmacc_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfmacc_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu @@ -88,7 +90,7 @@ entry: %1, %2, %3, - i64 %4) + iXLen %4) ret %a } @@ -97,9 +99,9 @@ declare @llvm.riscv.vfmacc.nxv4f16.nxv4f16( , , , - i64); + iXLen); -define @intrinsic_vfmacc_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vfmacc_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu @@ -110,7 +112,7 @@ entry: %0, %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -120,9 +122,9 @@ declare @llvm.riscv.vfmacc.mask.nxv4f16.nxv4f16( , , , - i64); + iXLen); -define @intrinsic_vfmacc_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfmacc_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu @@ -134,7 +136,7 @@ entry: %1, %2, %3, - i64 %4) + iXLen %4) ret %a } @@ -143,9 +145,9 @@ declare @llvm.riscv.vfmacc.nxv8f16.nxv8f16( , , , - i64); + iXLen); -define @intrinsic_vfmacc_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vfmacc_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu @@ -156,7 +158,7 @@ entry: %0, %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -166,9 +168,9 @@ declare @llvm.riscv.vfmacc.mask.nxv8f16.nxv8f16( , , , - i64); + iXLen); -define @intrinsic_vfmacc_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfmacc_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu @@ -180,7 +182,7 @@ entry: %1, %2, %3, - i64 %4) + iXLen %4) ret %a } @@ -189,9 +191,9 @@ declare @llvm.riscv.vfmacc.nxv16f16.nxv16f16( , , , - i64); + iXLen); -define @intrinsic_vfmacc_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vfmacc_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu @@ -202,7 +204,7 @@ entry: %0, %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -212,9 +214,9 @@ declare @llvm.riscv.vfmacc.mask.nxv16f16.nxv16f16( , , , - i64); + iXLen); -define @intrinsic_vfmacc_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfmacc_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu @@ -226,7 +228,7 @@ entry: %1, %2, %3, - i64 %4) + iXLen %4) ret %a } @@ -235,9 +237,9 @@ declare @llvm.riscv.vfmacc.nxv1f32.nxv1f32( , , , - i64); + iXLen); -define @intrinsic_vfmacc_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vfmacc_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu @@ -248,7 +250,7 @@ entry: %0, %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -258,9 +260,9 @@ declare @llvm.riscv.vfmacc.mask.nxv1f32.nxv1f32( , , , - i64); + iXLen); -define @intrinsic_vfmacc_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfmacc_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu @@ -272,7 +274,7 @@ entry: %1, %2, %3, - i64 %4) + iXLen %4) ret %a } @@ -281,9 +283,9 @@ declare @llvm.riscv.vfmacc.nxv2f32.nxv2f32( , , , - i64); + iXLen); -define @intrinsic_vfmacc_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vfmacc_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu @@ -294,7 +296,7 @@ entry: %0, %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -304,9 +306,9 @@ declare @llvm.riscv.vfmacc.mask.nxv2f32.nxv2f32( , , , - i64); + iXLen); -define @intrinsic_vfmacc_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfmacc_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu @@ -318,7 +320,7 @@ entry: %1, %2, %3, - i64 %4) + iXLen %4) ret %a } @@ -327,9 +329,9 @@ declare @llvm.riscv.vfmacc.nxv4f32.nxv4f32( , , , - i64); + iXLen); -define @intrinsic_vfmacc_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vfmacc_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu @@ -340,7 +342,7 @@ entry: %0, %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -350,9 +352,9 @@ declare @llvm.riscv.vfmacc.mask.nxv4f32.nxv4f32( , , , - i64); + iXLen); -define @intrinsic_vfmacc_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfmacc_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu @@ -364,7 +366,7 @@ entry: %1, %2, %3, - i64 %4) + iXLen %4) ret %a } @@ -373,9 +375,9 @@ declare @llvm.riscv.vfmacc.nxv8f32.nxv8f32( , , , - i64); + iXLen); -define @intrinsic_vfmacc_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vfmacc_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu @@ -386,7 +388,7 @@ entry: %0, %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -396,9 +398,9 @@ declare @llvm.riscv.vfmacc.mask.nxv8f32.nxv8f32( , , , - i64); + iXLen); -define @intrinsic_vfmacc_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfmacc_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu @@ -410,7 +412,7 @@ entry: %1, %2, %3, - i64 %4) + iXLen %4) ret %a } @@ -419,9 +421,9 @@ declare @llvm.riscv.vfmacc.nxv1f64.nxv1f64( , , , - i64); + iXLen); -define @intrinsic_vfmacc_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vfmacc_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu @@ -432,7 +434,7 @@ entry: %0, %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -442,9 +444,9 @@ declare @llvm.riscv.vfmacc.mask.nxv1f64.nxv1f64( , , , - i64); + iXLen); -define @intrinsic_vfmacc_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfmacc_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu @@ -456,7 +458,7 @@ entry: %1, %2, %3, - i64 %4) + iXLen %4) ret %a } @@ -465,9 +467,9 @@ declare @llvm.riscv.vfmacc.nxv2f64.nxv2f64( , , , - i64); + iXLen); -define @intrinsic_vfmacc_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vfmacc_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu @@ -478,7 +480,7 @@ entry: %0, %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -488,9 +490,9 @@ declare @llvm.riscv.vfmacc.mask.nxv2f64.nxv2f64( , , , - i64); + iXLen); -define @intrinsic_vfmacc_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfmacc_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu @@ -502,7 +504,7 @@ entry: %1, %2, %3, - i64 %4) + iXLen %4) ret %a } @@ -511,9 +513,9 @@ declare @llvm.riscv.vfmacc.nxv4f64.nxv4f64( , , , - i64); + iXLen); -define @intrinsic_vfmacc_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vfmacc_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu @@ -524,7 +526,7 @@ entry: %0, %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -534,9 +536,9 @@ declare @llvm.riscv.vfmacc.mask.nxv4f64.nxv4f64( , , , - i64); + iXLen); -define @intrinsic_vfmacc_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfmacc_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu @@ -548,7 +550,7 @@ entry: %1, %2, %3, - i64 %4) + iXLen %4) ret %a } @@ -557,9 +559,9 @@ declare @llvm.riscv.vfmacc.nxv1f16.f16( , half, , - i64); + iXLen); -define @intrinsic_vfmacc_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, i64 %3) nounwind { +define @intrinsic_vfmacc_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vf_nxv1f16_f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu @@ -570,7 +572,7 @@ entry: %0, half %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -580,9 +582,9 @@ declare @llvm.riscv.vfmacc.mask.nxv1f16.f16( half, , , - i64); + iXLen); -define @intrinsic_vfmacc_mask_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfmacc_mask_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv1f16_f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu @@ -594,7 +596,7 @@ entry: half %1, %2, %3, - i64 %4) + iXLen %4) ret %a } @@ -603,9 +605,9 @@ declare @llvm.riscv.vfmacc.nxv2f16.f16( , half, , - i64); + iXLen); -define @intrinsic_vfmacc_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, i64 %3) nounwind { +define @intrinsic_vfmacc_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vf_nxv2f16_f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu @@ -616,7 +618,7 @@ entry: %0, half %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -626,9 +628,9 @@ declare @llvm.riscv.vfmacc.mask.nxv2f16.f16( half, , , - i64); + iXLen); -define @intrinsic_vfmacc_mask_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfmacc_mask_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv2f16_f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu @@ -640,7 +642,7 @@ entry: half %1, %2, %3, - i64 %4) + iXLen %4) ret %a } @@ -649,9 +651,9 @@ declare @llvm.riscv.vfmacc.nxv4f16.f16( , half, , - i64); + iXLen); -define @intrinsic_vfmacc_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, i64 %3) nounwind { +define @intrinsic_vfmacc_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vf_nxv4f16_f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu @@ -662,7 +664,7 @@ entry: %0, half %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -672,9 +674,9 @@ declare @llvm.riscv.vfmacc.mask.nxv4f16.f16( half, , , - i64); + iXLen); -define @intrinsic_vfmacc_mask_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfmacc_mask_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv4f16_f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu @@ -686,7 +688,7 @@ entry: half %1, %2, %3, - i64 %4) + iXLen %4) ret %a } @@ -695,9 +697,9 @@ declare @llvm.riscv.vfmacc.nxv8f16.f16( , half, , - i64); + iXLen); -define @intrinsic_vfmacc_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, i64 %3) nounwind { +define @intrinsic_vfmacc_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vf_nxv8f16_f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu @@ -708,7 +710,7 @@ entry: %0, half %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -718,9 +720,9 @@ declare @llvm.riscv.vfmacc.mask.nxv8f16.f16( half, , , - i64); + iXLen); -define @intrinsic_vfmacc_mask_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfmacc_mask_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv8f16_f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu @@ -732,7 +734,7 @@ entry: half %1, %2, %3, - i64 %4) + iXLen %4) ret %a } @@ -741,9 +743,9 @@ declare @llvm.riscv.vfmacc.nxv16f16.f16( , half, , - i64); + iXLen); -define @intrinsic_vfmacc_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, i64 %3) nounwind { +define @intrinsic_vfmacc_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vf_nxv16f16_f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu @@ -754,7 +756,7 @@ entry: %0, half %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -764,9 +766,9 @@ declare @llvm.riscv.vfmacc.mask.nxv16f16.f16( half, , , - i64); + iXLen); -define @intrinsic_vfmacc_mask_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfmacc_mask_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv16f16_f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu @@ -778,7 +780,7 @@ entry: half %1, %2, %3, - i64 %4) + iXLen %4) ret %a } @@ -787,9 +789,9 @@ declare @llvm.riscv.vfmacc.nxv1f32.f32( , float, , - i64); + iXLen); -define @intrinsic_vfmacc_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, i64 %3) nounwind { +define @intrinsic_vfmacc_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vf_nxv1f32_f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu @@ -800,7 +802,7 @@ entry: %0, float %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -810,9 +812,9 @@ declare @llvm.riscv.vfmacc.mask.nxv1f32.f32( float, , , - i64); + iXLen); -define @intrinsic_vfmacc_mask_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfmacc_mask_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv1f32_f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu @@ -824,7 +826,7 @@ entry: float %1, %2, %3, - i64 %4) + iXLen %4) ret %a } @@ -833,9 +835,9 @@ declare @llvm.riscv.vfmacc.nxv2f32.f32( , float, , - i64); + iXLen); -define @intrinsic_vfmacc_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, i64 %3) nounwind { +define @intrinsic_vfmacc_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vf_nxv2f32_f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu @@ -846,7 +848,7 @@ entry: %0, float %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -856,9 +858,9 @@ declare @llvm.riscv.vfmacc.mask.nxv2f32.f32( float, , , - i64); + iXLen); -define @intrinsic_vfmacc_mask_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfmacc_mask_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv2f32_f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu @@ -870,7 +872,7 @@ entry: float %1, %2, %3, - i64 %4) + iXLen %4) ret %a } @@ -879,9 +881,9 @@ declare @llvm.riscv.vfmacc.nxv4f32.f32( , float, , - i64); + iXLen); -define @intrinsic_vfmacc_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, i64 %3) nounwind { +define @intrinsic_vfmacc_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vf_nxv4f32_f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu @@ -892,7 +894,7 @@ entry: %0, float %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -902,9 +904,9 @@ declare @llvm.riscv.vfmacc.mask.nxv4f32.f32( float, , , - i64); + iXLen); -define @intrinsic_vfmacc_mask_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfmacc_mask_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv4f32_f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu @@ -916,7 +918,7 @@ entry: float %1, %2, %3, - i64 %4) + iXLen %4) ret %a } @@ -925,9 +927,9 @@ declare @llvm.riscv.vfmacc.nxv8f32.f32( , float, , - i64); + iXLen); -define @intrinsic_vfmacc_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, i64 %3) nounwind { +define @intrinsic_vfmacc_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vf_nxv8f32_f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu @@ -938,7 +940,7 @@ entry: %0, float %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -948,9 +950,9 @@ declare @llvm.riscv.vfmacc.mask.nxv8f32.f32( float, , , - i64); + iXLen); -define @intrinsic_vfmacc_mask_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfmacc_mask_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv8f32_f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu @@ -962,7 +964,7 @@ entry: float %1, %2, %3, - i64 %4) + iXLen %4) ret %a } @@ -971,9 +973,9 @@ declare @llvm.riscv.vfmacc.nxv1f64.f64( , double, , - i64); + iXLen); -define @intrinsic_vfmacc_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, i64 %3) nounwind { +define @intrinsic_vfmacc_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vf_nxv1f64_f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu @@ -984,7 +986,7 @@ entry: %0, double %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -994,9 +996,9 @@ declare @llvm.riscv.vfmacc.mask.nxv1f64.f64( double, , , - i64); + iXLen); -define @intrinsic_vfmacc_mask_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfmacc_mask_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv1f64_f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu @@ -1008,7 +1010,7 @@ entry: double %1, %2, %3, - i64 %4) + iXLen %4) ret %a } @@ -1017,9 +1019,9 @@ declare @llvm.riscv.vfmacc.nxv2f64.f64( , double, , - i64); + iXLen); -define @intrinsic_vfmacc_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, i64 %3) nounwind { +define @intrinsic_vfmacc_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vf_nxv2f64_f64_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu @@ -1030,7 +1032,7 @@ entry: %0, double %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -1040,9 +1042,9 @@ declare @llvm.riscv.vfmacc.mask.nxv2f64.f64( double, , , - i64); + iXLen); -define @intrinsic_vfmacc_mask_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfmacc_mask_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv2f64_f64_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu @@ -1054,7 +1056,7 @@ entry: double %1, %2, %3, - i64 %4) + iXLen %4) ret %a } @@ -1063,9 +1065,9 @@ declare @llvm.riscv.vfmacc.nxv4f64.f64( , double, , - i64); + iXLen); -define @intrinsic_vfmacc_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, i64 %3) nounwind { +define @intrinsic_vfmacc_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vf_nxv4f64_f64_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu @@ -1076,7 +1078,7 @@ entry: %0, double %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -1086,9 +1088,9 @@ declare @llvm.riscv.vfmacc.mask.nxv4f64.f64( double, , , - i64); + iXLen); -define @intrinsic_vfmacc_mask_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfmacc_mask_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv4f64_f64_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu @@ -1100,7 +1102,7 @@ entry: double %1, %2, %3, - i64 %4) + iXLen %4) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmadd-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfmadd-rv64.ll deleted file mode 100644 index afd41bd8a2122bcb6724c2c21acf8f275d493fa8..0000000000000000000000000000000000000000 --- a/llvm/test/CodeGen/RISCV/rvv/vfmadd-rv64.ll +++ /dev/null @@ -1,1106 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: -target-abi=lp64d < %s | FileCheck %s -declare @llvm.riscv.vfmadd.nxv1f16.nxv1f16( - , - , - , - i64); - -define @intrinsic_vfmadd_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmadd_vv_nxv1f16_nxv1f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu -; CHECK-NEXT: vfmadd.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmadd.nxv1f16.nxv1f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfmadd.mask.nxv1f16.nxv1f16( - , - , - , - , - i64); - -define @intrinsic_vfmadd_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv1f16_nxv1f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu -; CHECK-NEXT: vfmadd.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmadd.mask.nxv1f16.nxv1f16( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfmadd.nxv2f16.nxv2f16( - , - , - , - i64); - -define @intrinsic_vfmadd_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmadd_vv_nxv2f16_nxv2f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu -; CHECK-NEXT: vfmadd.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmadd.nxv2f16.nxv2f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfmadd.mask.nxv2f16.nxv2f16( - , - , - , - , - i64); - -define @intrinsic_vfmadd_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv2f16_nxv2f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu -; CHECK-NEXT: vfmadd.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmadd.mask.nxv2f16.nxv2f16( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfmadd.nxv4f16.nxv4f16( - , - , - , - i64); - -define @intrinsic_vfmadd_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmadd_vv_nxv4f16_nxv4f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu -; CHECK-NEXT: vfmadd.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmadd.nxv4f16.nxv4f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfmadd.mask.nxv4f16.nxv4f16( - , - , - , - , - i64); - -define @intrinsic_vfmadd_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv4f16_nxv4f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu -; CHECK-NEXT: vfmadd.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmadd.mask.nxv4f16.nxv4f16( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfmadd.nxv8f16.nxv8f16( - , - , - , - i64); - -define @intrinsic_vfmadd_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmadd_vv_nxv8f16_nxv8f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu -; CHECK-NEXT: vfmadd.vv v8, v10, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmadd.nxv8f16.nxv8f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfmadd.mask.nxv8f16.nxv8f16( - , - , - , - , - i64); - -define @intrinsic_vfmadd_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv8f16_nxv8f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu -; CHECK-NEXT: vfmadd.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmadd.mask.nxv8f16.nxv8f16( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfmadd.nxv16f16.nxv16f16( - , - , - , - i64); - -define @intrinsic_vfmadd_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmadd_vv_nxv16f16_nxv16f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu -; CHECK-NEXT: vfmadd.vv v8, v12, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmadd.nxv16f16.nxv16f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfmadd.mask.nxv16f16.nxv16f16( - , - , - , - , - i64); - -define @intrinsic_vfmadd_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv16f16_nxv16f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu -; CHECK-NEXT: vfmadd.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmadd.mask.nxv16f16.nxv16f16( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfmadd.nxv1f32.nxv1f32( - , - , - , - i64); - -define @intrinsic_vfmadd_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmadd_vv_nxv1f32_nxv1f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu -; CHECK-NEXT: vfmadd.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmadd.nxv1f32.nxv1f32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfmadd.mask.nxv1f32.nxv1f32( - , - , - , - , - i64); - -define @intrinsic_vfmadd_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv1f32_nxv1f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu -; CHECK-NEXT: vfmadd.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmadd.mask.nxv1f32.nxv1f32( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfmadd.nxv2f32.nxv2f32( - , - , - , - i64); - -define @intrinsic_vfmadd_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmadd_vv_nxv2f32_nxv2f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu -; CHECK-NEXT: vfmadd.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmadd.nxv2f32.nxv2f32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfmadd.mask.nxv2f32.nxv2f32( - , - , - , - , - i64); - -define @intrinsic_vfmadd_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv2f32_nxv2f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu -; CHECK-NEXT: vfmadd.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmadd.mask.nxv2f32.nxv2f32( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfmadd.nxv4f32.nxv4f32( - , - , - , - i64); - -define @intrinsic_vfmadd_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmadd_vv_nxv4f32_nxv4f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu -; CHECK-NEXT: vfmadd.vv v8, v10, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmadd.nxv4f32.nxv4f32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfmadd.mask.nxv4f32.nxv4f32( - , - , - , - , - i64); - -define @intrinsic_vfmadd_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv4f32_nxv4f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu -; CHECK-NEXT: vfmadd.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmadd.mask.nxv4f32.nxv4f32( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfmadd.nxv8f32.nxv8f32( - , - , - , - i64); - -define @intrinsic_vfmadd_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmadd_vv_nxv8f32_nxv8f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu -; CHECK-NEXT: vfmadd.vv v8, v12, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmadd.nxv8f32.nxv8f32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfmadd.mask.nxv8f32.nxv8f32( - , - , - , - , - i64); - -define @intrinsic_vfmadd_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv8f32_nxv8f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu -; CHECK-NEXT: vfmadd.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmadd.mask.nxv8f32.nxv8f32( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfmadd.nxv1f64.nxv1f64( - , - , - , - i64); - -define @intrinsic_vfmadd_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmadd_vv_nxv1f64_nxv1f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu -; CHECK-NEXT: vfmadd.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmadd.nxv1f64.nxv1f64( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfmadd.mask.nxv1f64.nxv1f64( - , - , - , - , - i64); - -define @intrinsic_vfmadd_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv1f64_nxv1f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu -; CHECK-NEXT: vfmadd.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmadd.mask.nxv1f64.nxv1f64( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfmadd.nxv2f64.nxv2f64( - , - , - , - i64); - -define @intrinsic_vfmadd_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmadd_vv_nxv2f64_nxv2f64_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu -; CHECK-NEXT: vfmadd.vv v8, v10, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmadd.nxv2f64.nxv2f64( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfmadd.mask.nxv2f64.nxv2f64( - , - , - , - , - i64); - -define @intrinsic_vfmadd_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv2f64_nxv2f64_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu -; CHECK-NEXT: vfmadd.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmadd.mask.nxv2f64.nxv2f64( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfmadd.nxv4f64.nxv4f64( - , - , - , - i64); - -define @intrinsic_vfmadd_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmadd_vv_nxv4f64_nxv4f64_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu -; CHECK-NEXT: vfmadd.vv v8, v12, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmadd.nxv4f64.nxv4f64( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfmadd.mask.nxv4f64.nxv4f64( - , - , - , - , - i64); - -define @intrinsic_vfmadd_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv4f64_nxv4f64_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu -; CHECK-NEXT: vfmadd.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmadd.mask.nxv4f64.nxv4f64( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfmadd.nxv1f16.f16( - , - half, - , - i64); - -define @intrinsic_vfmadd_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmadd_vf_nxv1f16_f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu -; CHECK-NEXT: vfmadd.vf v8, fa0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmadd.nxv1f16.f16( - %0, - half %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfmadd.mask.nxv1f16.f16( - , - half, - , - , - i64); - -define @intrinsic_vfmadd_mask_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv1f16_f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu -; CHECK-NEXT: vfmadd.vf v8, fa0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmadd.mask.nxv1f16.f16( - %0, - half %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfmadd.nxv2f16.f16( - , - half, - , - i64); - -define @intrinsic_vfmadd_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmadd_vf_nxv2f16_f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu -; CHECK-NEXT: vfmadd.vf v8, fa0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmadd.nxv2f16.f16( - %0, - half %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfmadd.mask.nxv2f16.f16( - , - half, - , - , - i64); - -define @intrinsic_vfmadd_mask_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv2f16_f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu -; CHECK-NEXT: vfmadd.vf v8, fa0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmadd.mask.nxv2f16.f16( - %0, - half %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfmadd.nxv4f16.f16( - , - half, - , - i64); - -define @intrinsic_vfmadd_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmadd_vf_nxv4f16_f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu -; CHECK-NEXT: vfmadd.vf v8, fa0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmadd.nxv4f16.f16( - %0, - half %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfmadd.mask.nxv4f16.f16( - , - half, - , - , - i64); - -define @intrinsic_vfmadd_mask_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv4f16_f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu -; CHECK-NEXT: vfmadd.vf v8, fa0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmadd.mask.nxv4f16.f16( - %0, - half %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfmadd.nxv8f16.f16( - , - half, - , - i64); - -define @intrinsic_vfmadd_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmadd_vf_nxv8f16_f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu -; CHECK-NEXT: vfmadd.vf v8, fa0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmadd.nxv8f16.f16( - %0, - half %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfmadd.mask.nxv8f16.f16( - , - half, - , - , - i64); - -define @intrinsic_vfmadd_mask_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv8f16_f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu -; CHECK-NEXT: vfmadd.vf v8, fa0, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmadd.mask.nxv8f16.f16( - %0, - half %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfmadd.nxv16f16.f16( - , - half, - , - i64); - -define @intrinsic_vfmadd_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmadd_vf_nxv16f16_f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu -; CHECK-NEXT: vfmadd.vf v8, fa0, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmadd.nxv16f16.f16( - %0, - half %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfmadd.mask.nxv16f16.f16( - , - half, - , - , - i64); - -define @intrinsic_vfmadd_mask_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv16f16_f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu -; CHECK-NEXT: vfmadd.vf v8, fa0, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmadd.mask.nxv16f16.f16( - %0, - half %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfmadd.nxv1f32.f32( - , - float, - , - i64); - -define @intrinsic_vfmadd_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmadd_vf_nxv1f32_f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu -; CHECK-NEXT: vfmadd.vf v8, fa0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmadd.nxv1f32.f32( - %0, - float %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfmadd.mask.nxv1f32.f32( - , - float, - , - , - i64); - -define @intrinsic_vfmadd_mask_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv1f32_f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu -; CHECK-NEXT: vfmadd.vf v8, fa0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmadd.mask.nxv1f32.f32( - %0, - float %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfmadd.nxv2f32.f32( - , - float, - , - i64); - -define @intrinsic_vfmadd_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmadd_vf_nxv2f32_f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu -; CHECK-NEXT: vfmadd.vf v8, fa0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmadd.nxv2f32.f32( - %0, - float %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfmadd.mask.nxv2f32.f32( - , - float, - , - , - i64); - -define @intrinsic_vfmadd_mask_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv2f32_f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu -; CHECK-NEXT: vfmadd.vf v8, fa0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmadd.mask.nxv2f32.f32( - %0, - float %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfmadd.nxv4f32.f32( - , - float, - , - i64); - -define @intrinsic_vfmadd_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmadd_vf_nxv4f32_f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu -; CHECK-NEXT: vfmadd.vf v8, fa0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmadd.nxv4f32.f32( - %0, - float %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfmadd.mask.nxv4f32.f32( - , - float, - , - , - i64); - -define @intrinsic_vfmadd_mask_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv4f32_f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu -; CHECK-NEXT: vfmadd.vf v8, fa0, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmadd.mask.nxv4f32.f32( - %0, - float %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfmadd.nxv8f32.f32( - , - float, - , - i64); - -define @intrinsic_vfmadd_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmadd_vf_nxv8f32_f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu -; CHECK-NEXT: vfmadd.vf v8, fa0, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmadd.nxv8f32.f32( - %0, - float %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfmadd.mask.nxv8f32.f32( - , - float, - , - , - i64); - -define @intrinsic_vfmadd_mask_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv8f32_f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu -; CHECK-NEXT: vfmadd.vf v8, fa0, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmadd.mask.nxv8f32.f32( - %0, - float %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfmadd.nxv1f64.f64( - , - double, - , - i64); - -define @intrinsic_vfmadd_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmadd_vf_nxv1f64_f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu -; CHECK-NEXT: vfmadd.vf v8, fa0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmadd.nxv1f64.f64( - %0, - double %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfmadd.mask.nxv1f64.f64( - , - double, - , - , - i64); - -define @intrinsic_vfmadd_mask_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv1f64_f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu -; CHECK-NEXT: vfmadd.vf v8, fa0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmadd.mask.nxv1f64.f64( - %0, - double %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfmadd.nxv2f64.f64( - , - double, - , - i64); - -define @intrinsic_vfmadd_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmadd_vf_nxv2f64_f64_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu -; CHECK-NEXT: vfmadd.vf v8, fa0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmadd.nxv2f64.f64( - %0, - double %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfmadd.mask.nxv2f64.f64( - , - double, - , - , - i64); - -define @intrinsic_vfmadd_mask_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv2f64_f64_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu -; CHECK-NEXT: vfmadd.vf v8, fa0, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmadd.mask.nxv2f64.f64( - %0, - double %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfmadd.nxv4f64.f64( - , - double, - , - i64); - -define @intrinsic_vfmadd_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmadd_vf_nxv4f64_f64_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu -; CHECK-NEXT: vfmadd.vf v8, fa0, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmadd.nxv4f64.f64( - %0, - double %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfmadd.mask.nxv4f64.f64( - , - double, - , - , - i64); - -define @intrinsic_vfmadd_mask_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv4f64_f64_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu -; CHECK-NEXT: vfmadd.vf v8, fa0, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmadd.mask.nxv4f64.f64( - %0, - double %1, - %2, - %3, - i64 %4) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmadd-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfmadd.ll similarity index 89% rename from llvm/test/CodeGen/RISCV/rvv/vfmadd-rv32.ll rename to llvm/test/CodeGen/RISCV/rvv/vfmadd.ll index cfb32cfab4cdc10c419953a057d93bcb31997975..9313e440e500f96d53830a50edf3ed16de7b763b 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfmadd-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmadd.ll @@ -1,13 +1,15 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: -target-abi=ilp32d < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfmadd.nxv1f16.nxv1f16( , , , - i32); + iXLen); -define @intrinsic_vfmadd_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfmadd_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu @@ -18,7 +20,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -28,9 +30,9 @@ declare @llvm.riscv.vfmadd.mask.nxv1f16.nxv1f16( , , , - i32); + iXLen); -define @intrinsic_vfmadd_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmadd_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu @@ -42,7 +44,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -51,9 +53,9 @@ declare @llvm.riscv.vfmadd.nxv2f16.nxv2f16( , , , - i32); + iXLen); -define @intrinsic_vfmadd_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfmadd_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu @@ -64,7 +66,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -74,9 +76,9 @@ declare @llvm.riscv.vfmadd.mask.nxv2f16.nxv2f16( , , , - i32); + iXLen); -define @intrinsic_vfmadd_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmadd_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu @@ -88,7 +90,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -97,9 +99,9 @@ declare @llvm.riscv.vfmadd.nxv4f16.nxv4f16( , , , - i32); + iXLen); -define @intrinsic_vfmadd_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfmadd_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu @@ -110,7 +112,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -120,9 +122,9 @@ declare @llvm.riscv.vfmadd.mask.nxv4f16.nxv4f16( , , , - i32); + iXLen); -define @intrinsic_vfmadd_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmadd_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu @@ -134,7 +136,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -143,9 +145,9 @@ declare @llvm.riscv.vfmadd.nxv8f16.nxv8f16( , , , - i32); + iXLen); -define @intrinsic_vfmadd_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfmadd_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu @@ -156,7 +158,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -166,9 +168,9 @@ declare @llvm.riscv.vfmadd.mask.nxv8f16.nxv8f16( , , , - i32); + iXLen); -define @intrinsic_vfmadd_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmadd_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu @@ -180,7 +182,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -189,9 +191,9 @@ declare @llvm.riscv.vfmadd.nxv16f16.nxv16f16( , , , - i32); + iXLen); -define @intrinsic_vfmadd_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfmadd_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu @@ -202,7 +204,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -212,9 +214,9 @@ declare @llvm.riscv.vfmadd.mask.nxv16f16.nxv16f16( , , , - i32); + iXLen); -define @intrinsic_vfmadd_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmadd_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu @@ -226,7 +228,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -235,9 +237,9 @@ declare @llvm.riscv.vfmadd.nxv1f32.nxv1f32( , , , - i32); + iXLen); -define @intrinsic_vfmadd_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfmadd_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu @@ -248,7 +250,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -258,9 +260,9 @@ declare @llvm.riscv.vfmadd.mask.nxv1f32.nxv1f32( , , , - i32); + iXLen); -define @intrinsic_vfmadd_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmadd_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu @@ -272,7 +274,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -281,9 +283,9 @@ declare @llvm.riscv.vfmadd.nxv2f32.nxv2f32( , , , - i32); + iXLen); -define @intrinsic_vfmadd_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfmadd_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu @@ -294,7 +296,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -304,9 +306,9 @@ declare @llvm.riscv.vfmadd.mask.nxv2f32.nxv2f32( , , , - i32); + iXLen); -define @intrinsic_vfmadd_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmadd_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu @@ -318,7 +320,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -327,9 +329,9 @@ declare @llvm.riscv.vfmadd.nxv4f32.nxv4f32( , , , - i32); + iXLen); -define @intrinsic_vfmadd_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfmadd_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu @@ -340,7 +342,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -350,9 +352,9 @@ declare @llvm.riscv.vfmadd.mask.nxv4f32.nxv4f32( , , , - i32); + iXLen); -define @intrinsic_vfmadd_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmadd_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu @@ -364,7 +366,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -373,9 +375,9 @@ declare @llvm.riscv.vfmadd.nxv8f32.nxv8f32( , , , - i32); + iXLen); -define @intrinsic_vfmadd_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfmadd_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu @@ -386,7 +388,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -396,9 +398,9 @@ declare @llvm.riscv.vfmadd.mask.nxv8f32.nxv8f32( , , , - i32); + iXLen); -define @intrinsic_vfmadd_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmadd_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu @@ -410,7 +412,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -419,9 +421,9 @@ declare @llvm.riscv.vfmadd.nxv1f64.nxv1f64( , , , - i32); + iXLen); -define @intrinsic_vfmadd_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfmadd_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu @@ -432,7 +434,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -442,9 +444,9 @@ declare @llvm.riscv.vfmadd.mask.nxv1f64.nxv1f64( , , , - i32); + iXLen); -define @intrinsic_vfmadd_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmadd_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu @@ -456,7 +458,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -465,9 +467,9 @@ declare @llvm.riscv.vfmadd.nxv2f64.nxv2f64( , , , - i32); + iXLen); -define @intrinsic_vfmadd_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfmadd_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu @@ -478,7 +480,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -488,9 +490,9 @@ declare @llvm.riscv.vfmadd.mask.nxv2f64.nxv2f64( , , , - i32); + iXLen); -define @intrinsic_vfmadd_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmadd_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu @@ -502,7 +504,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -511,9 +513,9 @@ declare @llvm.riscv.vfmadd.nxv4f64.nxv4f64( , , , - i32); + iXLen); -define @intrinsic_vfmadd_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfmadd_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu @@ -524,7 +526,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -534,9 +536,9 @@ declare @llvm.riscv.vfmadd.mask.nxv4f64.nxv4f64( , , , - i32); + iXLen); -define @intrinsic_vfmadd_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmadd_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu @@ -548,7 +550,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -557,9 +559,9 @@ declare @llvm.riscv.vfmadd.nxv1f16.f16( , half, , - i32); + iXLen); -define @intrinsic_vfmadd_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, i32 %3) nounwind { +define @intrinsic_vfmadd_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vf_nxv1f16_f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu @@ -570,7 +572,7 @@ entry: %0, half %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -580,9 +582,9 @@ declare @llvm.riscv.vfmadd.mask.nxv1f16.f16( half, , , - i32); + iXLen); -define @intrinsic_vfmadd_mask_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmadd_mask_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv1f16_f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu @@ -594,7 +596,7 @@ entry: half %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -603,9 +605,9 @@ declare @llvm.riscv.vfmadd.nxv2f16.f16( , half, , - i32); + iXLen); -define @intrinsic_vfmadd_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, i32 %3) nounwind { +define @intrinsic_vfmadd_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vf_nxv2f16_f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu @@ -616,7 +618,7 @@ entry: %0, half %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -626,9 +628,9 @@ declare @llvm.riscv.vfmadd.mask.nxv2f16.f16( half, , , - i32); + iXLen); -define @intrinsic_vfmadd_mask_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmadd_mask_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv2f16_f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu @@ -640,7 +642,7 @@ entry: half %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -649,9 +651,9 @@ declare @llvm.riscv.vfmadd.nxv4f16.f16( , half, , - i32); + iXLen); -define @intrinsic_vfmadd_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, i32 %3) nounwind { +define @intrinsic_vfmadd_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vf_nxv4f16_f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu @@ -662,7 +664,7 @@ entry: %0, half %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -672,9 +674,9 @@ declare @llvm.riscv.vfmadd.mask.nxv4f16.f16( half, , , - i32); + iXLen); -define @intrinsic_vfmadd_mask_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmadd_mask_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv4f16_f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu @@ -686,7 +688,7 @@ entry: half %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -695,9 +697,9 @@ declare @llvm.riscv.vfmadd.nxv8f16.f16( , half, , - i32); + iXLen); -define @intrinsic_vfmadd_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, i32 %3) nounwind { +define @intrinsic_vfmadd_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vf_nxv8f16_f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu @@ -708,7 +710,7 @@ entry: %0, half %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -718,9 +720,9 @@ declare @llvm.riscv.vfmadd.mask.nxv8f16.f16( half, , , - i32); + iXLen); -define @intrinsic_vfmadd_mask_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmadd_mask_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv8f16_f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu @@ -732,7 +734,7 @@ entry: half %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -741,9 +743,9 @@ declare @llvm.riscv.vfmadd.nxv16f16.f16( , half, , - i32); + iXLen); -define @intrinsic_vfmadd_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, i32 %3) nounwind { +define @intrinsic_vfmadd_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vf_nxv16f16_f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu @@ -754,7 +756,7 @@ entry: %0, half %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -764,9 +766,9 @@ declare @llvm.riscv.vfmadd.mask.nxv16f16.f16( half, , , - i32); + iXLen); -define @intrinsic_vfmadd_mask_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmadd_mask_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv16f16_f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu @@ -778,7 +780,7 @@ entry: half %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -787,9 +789,9 @@ declare @llvm.riscv.vfmadd.nxv1f32.f32( , float, , - i32); + iXLen); -define @intrinsic_vfmadd_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, i32 %3) nounwind { +define @intrinsic_vfmadd_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vf_nxv1f32_f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu @@ -800,7 +802,7 @@ entry: %0, float %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -810,9 +812,9 @@ declare @llvm.riscv.vfmadd.mask.nxv1f32.f32( float, , , - i32); + iXLen); -define @intrinsic_vfmadd_mask_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmadd_mask_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv1f32_f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu @@ -824,7 +826,7 @@ entry: float %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -833,9 +835,9 @@ declare @llvm.riscv.vfmadd.nxv2f32.f32( , float, , - i32); + iXLen); -define @intrinsic_vfmadd_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, i32 %3) nounwind { +define @intrinsic_vfmadd_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vf_nxv2f32_f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu @@ -846,7 +848,7 @@ entry: %0, float %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -856,9 +858,9 @@ declare @llvm.riscv.vfmadd.mask.nxv2f32.f32( float, , , - i32); + iXLen); -define @intrinsic_vfmadd_mask_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmadd_mask_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv2f32_f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu @@ -870,7 +872,7 @@ entry: float %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -879,9 +881,9 @@ declare @llvm.riscv.vfmadd.nxv4f32.f32( , float, , - i32); + iXLen); -define @intrinsic_vfmadd_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, i32 %3) nounwind { +define @intrinsic_vfmadd_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vf_nxv4f32_f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu @@ -892,7 +894,7 @@ entry: %0, float %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -902,9 +904,9 @@ declare @llvm.riscv.vfmadd.mask.nxv4f32.f32( float, , , - i32); + iXLen); -define @intrinsic_vfmadd_mask_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmadd_mask_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv4f32_f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu @@ -916,7 +918,7 @@ entry: float %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -925,9 +927,9 @@ declare @llvm.riscv.vfmadd.nxv8f32.f32( , float, , - i32); + iXLen); -define @intrinsic_vfmadd_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, i32 %3) nounwind { +define @intrinsic_vfmadd_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vf_nxv8f32_f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu @@ -938,7 +940,7 @@ entry: %0, float %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -948,9 +950,9 @@ declare @llvm.riscv.vfmadd.mask.nxv8f32.f32( float, , , - i32); + iXLen); -define @intrinsic_vfmadd_mask_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmadd_mask_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv8f32_f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu @@ -962,7 +964,7 @@ entry: float %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -971,9 +973,9 @@ declare @llvm.riscv.vfmadd.nxv1f64.f64( , double, , - i32); + iXLen); -define @intrinsic_vfmadd_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, i32 %3) nounwind { +define @intrinsic_vfmadd_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vf_nxv1f64_f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu @@ -984,7 +986,7 @@ entry: %0, double %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -994,9 +996,9 @@ declare @llvm.riscv.vfmadd.mask.nxv1f64.f64( double, , , - i32); + iXLen); -define @intrinsic_vfmadd_mask_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmadd_mask_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv1f64_f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu @@ -1008,7 +1010,7 @@ entry: double %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -1017,9 +1019,9 @@ declare @llvm.riscv.vfmadd.nxv2f64.f64( , double, , - i32); + iXLen); -define @intrinsic_vfmadd_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, i32 %3) nounwind { +define @intrinsic_vfmadd_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vf_nxv2f64_f64_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu @@ -1030,7 +1032,7 @@ entry: %0, double %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -1040,9 +1042,9 @@ declare @llvm.riscv.vfmadd.mask.nxv2f64.f64( double, , , - i32); + iXLen); -define @intrinsic_vfmadd_mask_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmadd_mask_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv2f64_f64_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu @@ -1054,7 +1056,7 @@ entry: double %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -1063,9 +1065,9 @@ declare @llvm.riscv.vfmadd.nxv4f64.f64( , double, , - i32); + iXLen); -define @intrinsic_vfmadd_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, i32 %3) nounwind { +define @intrinsic_vfmadd_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vf_nxv4f64_f64_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu @@ -1076,7 +1078,7 @@ entry: %0, double %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -1086,9 +1088,9 @@ declare @llvm.riscv.vfmadd.mask.nxv4f64.f64( double, , , - i32); + iXLen); -define @intrinsic_vfmadd_mask_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmadd_mask_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv4f64_f64_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu @@ -1100,7 +1102,7 @@ entry: double %1, %2, %3, - i32 %4) + iXLen %4) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmax-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfmax-rv32.ll deleted file mode 100644 index 98b4cf71da14dcf689cdcfb3b80ee1cd977eb70b..0000000000000000000000000000000000000000 --- a/llvm/test/CodeGen/RISCV/rvv/vfmax-rv32.ll +++ /dev/null @@ -1,1355 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: -target-abi=ilp32d < %s | FileCheck %s -declare @llvm.riscv.vfmax.nxv1f16.nxv1f16( - , - , - i32); - -define @intrinsic_vfmax_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmax_vv_nxv1f16_nxv1f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfmax.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmax.nxv1f16.nxv1f16( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfmax.mask.nxv1f16.nxv1f16( - , - , - , - , - i32, - i32); - -define @intrinsic_vfmax_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv1f16_nxv1f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfmax.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmax.mask.nxv1f16.nxv1f16( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfmax.nxv2f16.nxv2f16( - , - , - i32); - -define @intrinsic_vfmax_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmax_vv_nxv2f16_nxv2f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfmax.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmax.nxv2f16.nxv2f16( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfmax.mask.nxv2f16.nxv2f16( - , - , - , - , - i32, - i32); - -define @intrinsic_vfmax_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv2f16_nxv2f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfmax.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmax.mask.nxv2f16.nxv2f16( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfmax.nxv4f16.nxv4f16( - , - , - i32); - -define @intrinsic_vfmax_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmax_vv_nxv4f16_nxv4f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfmax.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmax.nxv4f16.nxv4f16( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfmax.mask.nxv4f16.nxv4f16( - , - , - , - , - i32, - i32); - -define @intrinsic_vfmax_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv4f16_nxv4f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfmax.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmax.mask.nxv4f16.nxv4f16( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfmax.nxv8f16.nxv8f16( - , - , - i32); - -define @intrinsic_vfmax_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmax_vv_nxv8f16_nxv8f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfmax.vv v8, v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmax.nxv8f16.nxv8f16( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfmax.mask.nxv8f16.nxv8f16( - , - , - , - , - i32, - i32); - -define @intrinsic_vfmax_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv8f16_nxv8f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfmax.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmax.mask.nxv8f16.nxv8f16( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfmax.nxv16f16.nxv16f16( - , - , - i32); - -define @intrinsic_vfmax_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmax_vv_nxv16f16_nxv16f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfmax.vv v8, v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmax.nxv16f16.nxv16f16( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfmax.mask.nxv16f16.nxv16f16( - , - , - , - , - i32, - i32); - -define @intrinsic_vfmax_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv16f16_nxv16f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfmax.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmax.mask.nxv16f16.nxv16f16( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfmax.nxv32f16.nxv32f16( - , - , - i32); - -define @intrinsic_vfmax_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmax_vv_nxv32f16_nxv32f16_nxv32f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu -; CHECK-NEXT: vfmax.vv v8, v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmax.nxv32f16.nxv32f16( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfmax.mask.nxv32f16.nxv32f16( - , - , - , - , - i32, - i32); - -define @intrinsic_vfmax_mask_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv32f16_nxv32f16_nxv32f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu -; CHECK-NEXT: vfmax.vv v8, v16, v24, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmax.mask.nxv32f16.nxv32f16( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfmax.nxv1f32.nxv1f32( - , - , - i32); - -define @intrinsic_vfmax_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmax_vv_nxv1f32_nxv1f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfmax.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmax.nxv1f32.nxv1f32( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfmax.mask.nxv1f32.nxv1f32( - , - , - , - , - i32, - i32); - -define @intrinsic_vfmax_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv1f32_nxv1f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfmax.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmax.mask.nxv1f32.nxv1f32( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfmax.nxv2f32.nxv2f32( - , - , - i32); - -define @intrinsic_vfmax_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmax_vv_nxv2f32_nxv2f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfmax.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmax.nxv2f32.nxv2f32( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfmax.mask.nxv2f32.nxv2f32( - , - , - , - , - i32, - i32); - -define @intrinsic_vfmax_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv2f32_nxv2f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfmax.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmax.mask.nxv2f32.nxv2f32( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfmax.nxv4f32.nxv4f32( - , - , - i32); - -define @intrinsic_vfmax_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmax_vv_nxv4f32_nxv4f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfmax.vv v8, v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmax.nxv4f32.nxv4f32( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfmax.mask.nxv4f32.nxv4f32( - , - , - , - , - i32, - i32); - -define @intrinsic_vfmax_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv4f32_nxv4f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfmax.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmax.mask.nxv4f32.nxv4f32( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfmax.nxv8f32.nxv8f32( - , - , - i32); - -define @intrinsic_vfmax_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmax_vv_nxv8f32_nxv8f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfmax.vv v8, v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmax.nxv8f32.nxv8f32( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfmax.mask.nxv8f32.nxv8f32( - , - , - , - , - i32, - i32); - -define @intrinsic_vfmax_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv8f32_nxv8f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfmax.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmax.mask.nxv8f32.nxv8f32( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfmax.nxv16f32.nxv16f32( - , - , - i32); - -define @intrinsic_vfmax_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmax_vv_nxv16f32_nxv16f32_nxv16f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu -; CHECK-NEXT: vfmax.vv v8, v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmax.nxv16f32.nxv16f32( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfmax.mask.nxv16f32.nxv16f32( - , - , - , - , - i32, - i32); - -define @intrinsic_vfmax_mask_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv16f32_nxv16f32_nxv16f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vl8re32.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu -; CHECK-NEXT: vfmax.vv v8, v16, v24, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmax.mask.nxv16f32.nxv16f32( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfmax.nxv1f64.nxv1f64( - , - , - i32); - -define @intrinsic_vfmax_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmax_vv_nxv1f64_nxv1f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vfmax.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmax.nxv1f64.nxv1f64( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfmax.mask.nxv1f64.nxv1f64( - , - , - , - , - i32, - i32); - -define @intrinsic_vfmax_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv1f64_nxv1f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vfmax.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmax.mask.nxv1f64.nxv1f64( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfmax.nxv2f64.nxv2f64( - , - , - i32); - -define @intrinsic_vfmax_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmax_vv_nxv2f64_nxv2f64_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vfmax.vv v8, v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmax.nxv2f64.nxv2f64( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfmax.mask.nxv2f64.nxv2f64( - , - , - , - , - i32, - i32); - -define @intrinsic_vfmax_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv2f64_nxv2f64_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vfmax.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmax.mask.nxv2f64.nxv2f64( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfmax.nxv4f64.nxv4f64( - , - , - i32); - -define @intrinsic_vfmax_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmax_vv_nxv4f64_nxv4f64_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vfmax.vv v8, v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmax.nxv4f64.nxv4f64( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfmax.mask.nxv4f64.nxv4f64( - , - , - , - , - i32, - i32); - -define @intrinsic_vfmax_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv4f64_nxv4f64_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vfmax.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmax.mask.nxv4f64.nxv4f64( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfmax.nxv8f64.nxv8f64( - , - , - i32); - -define @intrinsic_vfmax_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmax_vv_nxv8f64_nxv8f64_nxv8f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu -; CHECK-NEXT: vfmax.vv v8, v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmax.nxv8f64.nxv8f64( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfmax.mask.nxv8f64.nxv8f64( - , - , - , - , - i32, - i32); - -define @intrinsic_vfmax_mask_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv8f64_nxv8f64_nxv8f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vl8re64.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu -; CHECK-NEXT: vfmax.vv v8, v16, v24, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmax.mask.nxv8f64.nxv8f64( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfmax.nxv1f16.f16( - , - half, - i32); - -define @intrinsic_vfmax_vf_nxv1f16_nxv1f16_f16( %0, half %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmax_vf_nxv1f16_nxv1f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfmax.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmax.nxv1f16.f16( - %0, - half %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfmax.mask.nxv1f16.f16( - , - , - half, - , - i32, - i32); - -define @intrinsic_vfmax_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv1f16_nxv1f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfmax.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmax.mask.nxv1f16.f16( - %0, - %1, - half %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfmax.nxv2f16.f16( - , - half, - i32); - -define @intrinsic_vfmax_vf_nxv2f16_nxv2f16_f16( %0, half %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmax_vf_nxv2f16_nxv2f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfmax.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmax.nxv2f16.f16( - %0, - half %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfmax.mask.nxv2f16.f16( - , - , - half, - , - i32, - i32); - -define @intrinsic_vfmax_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv2f16_nxv2f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfmax.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmax.mask.nxv2f16.f16( - %0, - %1, - half %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfmax.nxv4f16.f16( - , - half, - i32); - -define @intrinsic_vfmax_vf_nxv4f16_nxv4f16_f16( %0, half %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmax_vf_nxv4f16_nxv4f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfmax.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmax.nxv4f16.f16( - %0, - half %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfmax.mask.nxv4f16.f16( - , - , - half, - , - i32, - i32); - -define @intrinsic_vfmax_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv4f16_nxv4f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfmax.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmax.mask.nxv4f16.f16( - %0, - %1, - half %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfmax.nxv8f16.f16( - , - half, - i32); - -define @intrinsic_vfmax_vf_nxv8f16_nxv8f16_f16( %0, half %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmax_vf_nxv8f16_nxv8f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfmax.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmax.nxv8f16.f16( - %0, - half %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfmax.mask.nxv8f16.f16( - , - , - half, - , - i32, - i32); - -define @intrinsic_vfmax_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv8f16_nxv8f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfmax.vf v8, v10, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmax.mask.nxv8f16.f16( - %0, - %1, - half %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfmax.nxv16f16.f16( - , - half, - i32); - -define @intrinsic_vfmax_vf_nxv16f16_nxv16f16_f16( %0, half %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmax_vf_nxv16f16_nxv16f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfmax.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmax.nxv16f16.f16( - %0, - half %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfmax.mask.nxv16f16.f16( - , - , - half, - , - i32, - i32); - -define @intrinsic_vfmax_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv16f16_nxv16f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfmax.vf v8, v12, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmax.mask.nxv16f16.f16( - %0, - %1, - half %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfmax.nxv32f16.f16( - , - half, - i32); - -define @intrinsic_vfmax_vf_nxv32f16_nxv32f16_f16( %0, half %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmax_vf_nxv32f16_nxv32f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu -; CHECK-NEXT: vfmax.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmax.nxv32f16.f16( - %0, - half %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfmax.mask.nxv32f16.f16( - , - , - half, - , - i32, - i32); - -define @intrinsic_vfmax_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv32f16_nxv32f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu -; CHECK-NEXT: vfmax.vf v8, v16, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmax.mask.nxv32f16.f16( - %0, - %1, - half %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfmax.nxv1f32.f32( - , - float, - i32); - -define @intrinsic_vfmax_vf_nxv1f32_nxv1f32_f32( %0, float %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmax_vf_nxv1f32_nxv1f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfmax.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmax.nxv1f32.f32( - %0, - float %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfmax.mask.nxv1f32.f32( - , - , - float, - , - i32, - i32); - -define @intrinsic_vfmax_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv1f32_nxv1f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfmax.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmax.mask.nxv1f32.f32( - %0, - %1, - float %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfmax.nxv2f32.f32( - , - float, - i32); - -define @intrinsic_vfmax_vf_nxv2f32_nxv2f32_f32( %0, float %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmax_vf_nxv2f32_nxv2f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfmax.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmax.nxv2f32.f32( - %0, - float %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfmax.mask.nxv2f32.f32( - , - , - float, - , - i32, - i32); - -define @intrinsic_vfmax_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv2f32_nxv2f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfmax.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmax.mask.nxv2f32.f32( - %0, - %1, - float %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfmax.nxv4f32.f32( - , - float, - i32); - -define @intrinsic_vfmax_vf_nxv4f32_nxv4f32_f32( %0, float %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmax_vf_nxv4f32_nxv4f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfmax.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmax.nxv4f32.f32( - %0, - float %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfmax.mask.nxv4f32.f32( - , - , - float, - , - i32, - i32); - -define @intrinsic_vfmax_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv4f32_nxv4f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfmax.vf v8, v10, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmax.mask.nxv4f32.f32( - %0, - %1, - float %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfmax.nxv8f32.f32( - , - float, - i32); - -define @intrinsic_vfmax_vf_nxv8f32_nxv8f32_f32( %0, float %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmax_vf_nxv8f32_nxv8f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfmax.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmax.nxv8f32.f32( - %0, - float %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfmax.mask.nxv8f32.f32( - , - , - float, - , - i32, - i32); - -define @intrinsic_vfmax_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv8f32_nxv8f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfmax.vf v8, v12, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmax.mask.nxv8f32.f32( - %0, - %1, - float %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfmax.nxv16f32.f32( - , - float, - i32); - -define @intrinsic_vfmax_vf_nxv16f32_nxv16f32_f32( %0, float %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmax_vf_nxv16f32_nxv16f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu -; CHECK-NEXT: vfmax.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmax.nxv16f32.f32( - %0, - float %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfmax.mask.nxv16f32.f32( - , - , - float, - , - i32, - i32); - -define @intrinsic_vfmax_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv16f32_nxv16f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu -; CHECK-NEXT: vfmax.vf v8, v16, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmax.mask.nxv16f32.f32( - %0, - %1, - float %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfmax.nxv1f64.f64( - , - double, - i32); - -define @intrinsic_vfmax_vf_nxv1f64_nxv1f64_f64( %0, double %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmax_vf_nxv1f64_nxv1f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vfmax.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmax.nxv1f64.f64( - %0, - double %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfmax.mask.nxv1f64.f64( - , - , - double, - , - i32, - i32); - -define @intrinsic_vfmax_mask_vf_nxv1f64_nxv1f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv1f64_nxv1f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vfmax.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmax.mask.nxv1f64.f64( - %0, - %1, - double %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfmax.nxv2f64.f64( - , - double, - i32); - -define @intrinsic_vfmax_vf_nxv2f64_nxv2f64_f64( %0, double %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmax_vf_nxv2f64_nxv2f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vfmax.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmax.nxv2f64.f64( - %0, - double %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfmax.mask.nxv2f64.f64( - , - , - double, - , - i32, - i32); - -define @intrinsic_vfmax_mask_vf_nxv2f64_nxv2f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv2f64_nxv2f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vfmax.vf v8, v10, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmax.mask.nxv2f64.f64( - %0, - %1, - double %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfmax.nxv4f64.f64( - , - double, - i32); - -define @intrinsic_vfmax_vf_nxv4f64_nxv4f64_f64( %0, double %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmax_vf_nxv4f64_nxv4f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vfmax.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmax.nxv4f64.f64( - %0, - double %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfmax.mask.nxv4f64.f64( - , - , - double, - , - i32, - i32); - -define @intrinsic_vfmax_mask_vf_nxv4f64_nxv4f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv4f64_nxv4f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vfmax.vf v8, v12, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmax.mask.nxv4f64.f64( - %0, - %1, - double %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfmax.nxv8f64.f64( - , - double, - i32); - -define @intrinsic_vfmax_vf_nxv8f64_nxv8f64_f64( %0, double %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmax_vf_nxv8f64_nxv8f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu -; CHECK-NEXT: vfmax.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmax.nxv8f64.f64( - %0, - double %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfmax.mask.nxv8f64.f64( - , - , - double, - , - i32, - i32); - -define @intrinsic_vfmax_mask_vf_nxv8f64_nxv8f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv8f64_nxv8f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu -; CHECK-NEXT: vfmax.vf v8, v16, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmax.mask.nxv8f64.f64( - %0, - %1, - double %2, - %3, - i32 %4, i32 1) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmax-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfmax.ll similarity index 86% rename from llvm/test/CodeGen/RISCV/rvv/vfmax-rv64.ll rename to llvm/test/CodeGen/RISCV/rvv/vfmax.ll index 4fc7319fb2b295648ec1cf27080a6dd60d79f6d1..446981928b6cf6436b557c3eb5ba04bafeee2513 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfmax-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmax.ll @@ -1,12 +1,14 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: -target-abi=lp64d < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfmax.nxv1f16.nxv1f16( , , - i64); + iXLen); -define @intrinsic_vfmax_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, i64 %2) nounwind { +define @intrinsic_vfmax_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -16,7 +18,7 @@ entry: %a = call @llvm.riscv.vfmax.nxv1f16.nxv1f16( %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -26,10 +28,10 @@ declare @llvm.riscv.vfmax.mask.nxv1f16.nxv1f16( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfmax_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfmax_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -41,7 +43,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -49,9 +51,9 @@ entry: declare @llvm.riscv.vfmax.nxv2f16.nxv2f16( , , - i64); + iXLen); -define @intrinsic_vfmax_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, i64 %2) nounwind { +define @intrinsic_vfmax_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -61,7 +63,7 @@ entry: %a = call @llvm.riscv.vfmax.nxv2f16.nxv2f16( %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -71,10 +73,10 @@ declare @llvm.riscv.vfmax.mask.nxv2f16.nxv2f16( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfmax_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfmax_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -86,7 +88,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -94,9 +96,9 @@ entry: declare @llvm.riscv.vfmax.nxv4f16.nxv4f16( , , - i64); + iXLen); -define @intrinsic_vfmax_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, i64 %2) nounwind { +define @intrinsic_vfmax_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -106,7 +108,7 @@ entry: %a = call @llvm.riscv.vfmax.nxv4f16.nxv4f16( %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -116,10 +118,10 @@ declare @llvm.riscv.vfmax.mask.nxv4f16.nxv4f16( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfmax_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfmax_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -131,7 +133,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -139,9 +141,9 @@ entry: declare @llvm.riscv.vfmax.nxv8f16.nxv8f16( , , - i64); + iXLen); -define @intrinsic_vfmax_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, i64 %2) nounwind { +define @intrinsic_vfmax_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -151,7 +153,7 @@ entry: %a = call @llvm.riscv.vfmax.nxv8f16.nxv8f16( %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -161,10 +163,10 @@ declare @llvm.riscv.vfmax.mask.nxv8f16.nxv8f16( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfmax_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfmax_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -176,7 +178,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -184,9 +186,9 @@ entry: declare @llvm.riscv.vfmax.nxv16f16.nxv16f16( , , - i64); + iXLen); -define @intrinsic_vfmax_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, i64 %2) nounwind { +define @intrinsic_vfmax_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -196,7 +198,7 @@ entry: %a = call @llvm.riscv.vfmax.nxv16f16.nxv16f16( %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -206,10 +208,10 @@ declare @llvm.riscv.vfmax.mask.nxv16f16.nxv16f16( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfmax_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfmax_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -221,7 +223,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -229,9 +231,9 @@ entry: declare @llvm.riscv.vfmax.nxv32f16.nxv32f16( , , - i64); + iXLen); -define @intrinsic_vfmax_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, i64 %2) nounwind { +define @intrinsic_vfmax_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vv_nxv32f16_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu @@ -241,7 +243,7 @@ entry: %a = call @llvm.riscv.vfmax.nxv32f16.nxv32f16( %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -251,10 +253,10 @@ declare @llvm.riscv.vfmax.mask.nxv32f16.nxv32f16( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfmax_mask_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfmax_mask_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv32f16_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re16.v v24, (a0) @@ -267,7 +269,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -275,9 +277,9 @@ entry: declare @llvm.riscv.vfmax.nxv1f32.nxv1f32( , , - i64); + iXLen); -define @intrinsic_vfmax_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, i64 %2) nounwind { +define @intrinsic_vfmax_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -287,7 +289,7 @@ entry: %a = call @llvm.riscv.vfmax.nxv1f32.nxv1f32( %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -297,10 +299,10 @@ declare @llvm.riscv.vfmax.mask.nxv1f32.nxv1f32( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfmax_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfmax_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -312,7 +314,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -320,9 +322,9 @@ entry: declare @llvm.riscv.vfmax.nxv2f32.nxv2f32( , , - i64); + iXLen); -define @intrinsic_vfmax_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, i64 %2) nounwind { +define @intrinsic_vfmax_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -332,7 +334,7 @@ entry: %a = call @llvm.riscv.vfmax.nxv2f32.nxv2f32( %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -342,10 +344,10 @@ declare @llvm.riscv.vfmax.mask.nxv2f32.nxv2f32( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfmax_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfmax_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -357,7 +359,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -365,9 +367,9 @@ entry: declare @llvm.riscv.vfmax.nxv4f32.nxv4f32( , , - i64); + iXLen); -define @intrinsic_vfmax_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, i64 %2) nounwind { +define @intrinsic_vfmax_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -377,7 +379,7 @@ entry: %a = call @llvm.riscv.vfmax.nxv4f32.nxv4f32( %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -387,10 +389,10 @@ declare @llvm.riscv.vfmax.mask.nxv4f32.nxv4f32( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfmax_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfmax_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -402,7 +404,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -410,9 +412,9 @@ entry: declare @llvm.riscv.vfmax.nxv8f32.nxv8f32( , , - i64); + iXLen); -define @intrinsic_vfmax_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, i64 %2) nounwind { +define @intrinsic_vfmax_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -422,7 +424,7 @@ entry: %a = call @llvm.riscv.vfmax.nxv8f32.nxv8f32( %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -432,10 +434,10 @@ declare @llvm.riscv.vfmax.mask.nxv8f32.nxv8f32( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfmax_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfmax_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -447,7 +449,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -455,9 +457,9 @@ entry: declare @llvm.riscv.vfmax.nxv16f32.nxv16f32( , , - i64); + iXLen); -define @intrinsic_vfmax_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, i64 %2) nounwind { +define @intrinsic_vfmax_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vv_nxv16f32_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu @@ -467,7 +469,7 @@ entry: %a = call @llvm.riscv.vfmax.nxv16f32.nxv16f32( %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -477,10 +479,10 @@ declare @llvm.riscv.vfmax.mask.nxv16f32.nxv16f32( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfmax_mask_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfmax_mask_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv16f32_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re32.v v24, (a0) @@ -493,7 +495,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -501,9 +503,9 @@ entry: declare @llvm.riscv.vfmax.nxv1f64.nxv1f64( , , - i64); + iXLen); -define @intrinsic_vfmax_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, i64 %2) nounwind { +define @intrinsic_vfmax_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu @@ -513,7 +515,7 @@ entry: %a = call @llvm.riscv.vfmax.nxv1f64.nxv1f64( %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -523,10 +525,10 @@ declare @llvm.riscv.vfmax.mask.nxv1f64.nxv1f64( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfmax_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfmax_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu @@ -538,7 +540,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -546,9 +548,9 @@ entry: declare @llvm.riscv.vfmax.nxv2f64.nxv2f64( , , - i64); + iXLen); -define @intrinsic_vfmax_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, i64 %2) nounwind { +define @intrinsic_vfmax_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu @@ -558,7 +560,7 @@ entry: %a = call @llvm.riscv.vfmax.nxv2f64.nxv2f64( %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -568,10 +570,10 @@ declare @llvm.riscv.vfmax.mask.nxv2f64.nxv2f64( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfmax_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfmax_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu @@ -583,7 +585,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -591,9 +593,9 @@ entry: declare @llvm.riscv.vfmax.nxv4f64.nxv4f64( , , - i64); + iXLen); -define @intrinsic_vfmax_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, i64 %2) nounwind { +define @intrinsic_vfmax_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu @@ -603,7 +605,7 @@ entry: %a = call @llvm.riscv.vfmax.nxv4f64.nxv4f64( %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -613,10 +615,10 @@ declare @llvm.riscv.vfmax.mask.nxv4f64.nxv4f64( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfmax_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfmax_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu @@ -628,7 +630,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -636,9 +638,9 @@ entry: declare @llvm.riscv.vfmax.nxv8f64.nxv8f64( , , - i64); + iXLen); -define @intrinsic_vfmax_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, i64 %2) nounwind { +define @intrinsic_vfmax_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vv_nxv8f64_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu @@ -648,7 +650,7 @@ entry: %a = call @llvm.riscv.vfmax.nxv8f64.nxv8f64( %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -658,10 +660,10 @@ declare @llvm.riscv.vfmax.mask.nxv8f64.nxv8f64( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfmax_mask_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfmax_mask_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv8f64_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re64.v v24, (a0) @@ -674,7 +676,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -682,9 +684,9 @@ entry: declare @llvm.riscv.vfmax.nxv1f16.f16( , half, - i64); + iXLen); -define @intrinsic_vfmax_vf_nxv1f16_nxv1f16_f16( %0, half %1, i64 %2) nounwind { +define @intrinsic_vfmax_vf_nxv1f16_nxv1f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -694,7 +696,7 @@ entry: %a = call @llvm.riscv.vfmax.nxv1f16.f16( %0, half %1, - i64 %2) + iXLen %2) ret %a } @@ -704,10 +706,10 @@ declare @llvm.riscv.vfmax.mask.nxv1f16.f16( , half, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfmax_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { +define @intrinsic_vfmax_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -719,7 +721,7 @@ entry: %1, half %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -727,9 +729,9 @@ entry: declare @llvm.riscv.vfmax.nxv2f16.f16( , half, - i64); + iXLen); -define @intrinsic_vfmax_vf_nxv2f16_nxv2f16_f16( %0, half %1, i64 %2) nounwind { +define @intrinsic_vfmax_vf_nxv2f16_nxv2f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -739,7 +741,7 @@ entry: %a = call @llvm.riscv.vfmax.nxv2f16.f16( %0, half %1, - i64 %2) + iXLen %2) ret %a } @@ -749,10 +751,10 @@ declare @llvm.riscv.vfmax.mask.nxv2f16.f16( , half, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfmax_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { +define @intrinsic_vfmax_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -764,7 +766,7 @@ entry: %1, half %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -772,9 +774,9 @@ entry: declare @llvm.riscv.vfmax.nxv4f16.f16( , half, - i64); + iXLen); -define @intrinsic_vfmax_vf_nxv4f16_nxv4f16_f16( %0, half %1, i64 %2) nounwind { +define @intrinsic_vfmax_vf_nxv4f16_nxv4f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -784,7 +786,7 @@ entry: %a = call @llvm.riscv.vfmax.nxv4f16.f16( %0, half %1, - i64 %2) + iXLen %2) ret %a } @@ -794,10 +796,10 @@ declare @llvm.riscv.vfmax.mask.nxv4f16.f16( , half, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfmax_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { +define @intrinsic_vfmax_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -809,7 +811,7 @@ entry: %1, half %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -817,9 +819,9 @@ entry: declare @llvm.riscv.vfmax.nxv8f16.f16( , half, - i64); + iXLen); -define @intrinsic_vfmax_vf_nxv8f16_nxv8f16_f16( %0, half %1, i64 %2) nounwind { +define @intrinsic_vfmax_vf_nxv8f16_nxv8f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -829,7 +831,7 @@ entry: %a = call @llvm.riscv.vfmax.nxv8f16.f16( %0, half %1, - i64 %2) + iXLen %2) ret %a } @@ -839,10 +841,10 @@ declare @llvm.riscv.vfmax.mask.nxv8f16.f16( , half, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfmax_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { +define @intrinsic_vfmax_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -854,7 +856,7 @@ entry: %1, half %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -862,9 +864,9 @@ entry: declare @llvm.riscv.vfmax.nxv16f16.f16( , half, - i64); + iXLen); -define @intrinsic_vfmax_vf_nxv16f16_nxv16f16_f16( %0, half %1, i64 %2) nounwind { +define @intrinsic_vfmax_vf_nxv16f16_nxv16f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -874,7 +876,7 @@ entry: %a = call @llvm.riscv.vfmax.nxv16f16.f16( %0, half %1, - i64 %2) + iXLen %2) ret %a } @@ -884,10 +886,10 @@ declare @llvm.riscv.vfmax.mask.nxv16f16.f16( , half, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfmax_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { +define @intrinsic_vfmax_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -899,7 +901,7 @@ entry: %1, half %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -907,9 +909,9 @@ entry: declare @llvm.riscv.vfmax.nxv32f16.f16( , half, - i64); + iXLen); -define @intrinsic_vfmax_vf_nxv32f16_nxv32f16_f16( %0, half %1, i64 %2) nounwind { +define @intrinsic_vfmax_vf_nxv32f16_nxv32f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu @@ -919,7 +921,7 @@ entry: %a = call @llvm.riscv.vfmax.nxv32f16.f16( %0, half %1, - i64 %2) + iXLen %2) ret %a } @@ -929,10 +931,10 @@ declare @llvm.riscv.vfmax.mask.nxv32f16.f16( , half, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfmax_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { +define @intrinsic_vfmax_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu @@ -944,7 +946,7 @@ entry: %1, half %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -952,9 +954,9 @@ entry: declare @llvm.riscv.vfmax.nxv1f32.f32( , float, - i64); + iXLen); -define @intrinsic_vfmax_vf_nxv1f32_nxv1f32_f32( %0, float %1, i64 %2) nounwind { +define @intrinsic_vfmax_vf_nxv1f32_nxv1f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -964,7 +966,7 @@ entry: %a = call @llvm.riscv.vfmax.nxv1f32.f32( %0, float %1, - i64 %2) + iXLen %2) ret %a } @@ -974,10 +976,10 @@ declare @llvm.riscv.vfmax.mask.nxv1f32.f32( , float, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfmax_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { +define @intrinsic_vfmax_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -989,7 +991,7 @@ entry: %1, float %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -997,9 +999,9 @@ entry: declare @llvm.riscv.vfmax.nxv2f32.f32( , float, - i64); + iXLen); -define @intrinsic_vfmax_vf_nxv2f32_nxv2f32_f32( %0, float %1, i64 %2) nounwind { +define @intrinsic_vfmax_vf_nxv2f32_nxv2f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -1009,7 +1011,7 @@ entry: %a = call @llvm.riscv.vfmax.nxv2f32.f32( %0, float %1, - i64 %2) + iXLen %2) ret %a } @@ -1019,10 +1021,10 @@ declare @llvm.riscv.vfmax.mask.nxv2f32.f32( , float, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfmax_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { +define @intrinsic_vfmax_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -1034,7 +1036,7 @@ entry: %1, float %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -1042,9 +1044,9 @@ entry: declare @llvm.riscv.vfmax.nxv4f32.f32( , float, - i64); + iXLen); -define @intrinsic_vfmax_vf_nxv4f32_nxv4f32_f32( %0, float %1, i64 %2) nounwind { +define @intrinsic_vfmax_vf_nxv4f32_nxv4f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -1054,7 +1056,7 @@ entry: %a = call @llvm.riscv.vfmax.nxv4f32.f32( %0, float %1, - i64 %2) + iXLen %2) ret %a } @@ -1064,10 +1066,10 @@ declare @llvm.riscv.vfmax.mask.nxv4f32.f32( , float, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfmax_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { +define @intrinsic_vfmax_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -1079,7 +1081,7 @@ entry: %1, float %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -1087,9 +1089,9 @@ entry: declare @llvm.riscv.vfmax.nxv8f32.f32( , float, - i64); + iXLen); -define @intrinsic_vfmax_vf_nxv8f32_nxv8f32_f32( %0, float %1, i64 %2) nounwind { +define @intrinsic_vfmax_vf_nxv8f32_nxv8f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -1099,7 +1101,7 @@ entry: %a = call @llvm.riscv.vfmax.nxv8f32.f32( %0, float %1, - i64 %2) + iXLen %2) ret %a } @@ -1109,10 +1111,10 @@ declare @llvm.riscv.vfmax.mask.nxv8f32.f32( , float, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfmax_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { +define @intrinsic_vfmax_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -1124,7 +1126,7 @@ entry: %1, float %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -1132,9 +1134,9 @@ entry: declare @llvm.riscv.vfmax.nxv16f32.f32( , float, - i64); + iXLen); -define @intrinsic_vfmax_vf_nxv16f32_nxv16f32_f32( %0, float %1, i64 %2) nounwind { +define @intrinsic_vfmax_vf_nxv16f32_nxv16f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu @@ -1144,7 +1146,7 @@ entry: %a = call @llvm.riscv.vfmax.nxv16f32.f32( %0, float %1, - i64 %2) + iXLen %2) ret %a } @@ -1154,10 +1156,10 @@ declare @llvm.riscv.vfmax.mask.nxv16f32.f32( , float, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfmax_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { +define @intrinsic_vfmax_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu @@ -1169,7 +1171,7 @@ entry: %1, float %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -1177,9 +1179,9 @@ entry: declare @llvm.riscv.vfmax.nxv1f64.f64( , double, - i64); + iXLen); -define @intrinsic_vfmax_vf_nxv1f64_nxv1f64_f64( %0, double %1, i64 %2) nounwind { +define @intrinsic_vfmax_vf_nxv1f64_nxv1f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu @@ -1189,7 +1191,7 @@ entry: %a = call @llvm.riscv.vfmax.nxv1f64.f64( %0, double %1, - i64 %2) + iXLen %2) ret %a } @@ -1199,10 +1201,10 @@ declare @llvm.riscv.vfmax.mask.nxv1f64.f64( , double, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfmax_mask_vf_nxv1f64_nxv1f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { +define @intrinsic_vfmax_mask_vf_nxv1f64_nxv1f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu @@ -1214,7 +1216,7 @@ entry: %1, double %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -1222,9 +1224,9 @@ entry: declare @llvm.riscv.vfmax.nxv2f64.f64( , double, - i64); + iXLen); -define @intrinsic_vfmax_vf_nxv2f64_nxv2f64_f64( %0, double %1, i64 %2) nounwind { +define @intrinsic_vfmax_vf_nxv2f64_nxv2f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu @@ -1234,7 +1236,7 @@ entry: %a = call @llvm.riscv.vfmax.nxv2f64.f64( %0, double %1, - i64 %2) + iXLen %2) ret %a } @@ -1244,10 +1246,10 @@ declare @llvm.riscv.vfmax.mask.nxv2f64.f64( , double, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfmax_mask_vf_nxv2f64_nxv2f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { +define @intrinsic_vfmax_mask_vf_nxv2f64_nxv2f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu @@ -1259,7 +1261,7 @@ entry: %1, double %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -1267,9 +1269,9 @@ entry: declare @llvm.riscv.vfmax.nxv4f64.f64( , double, - i64); + iXLen); -define @intrinsic_vfmax_vf_nxv4f64_nxv4f64_f64( %0, double %1, i64 %2) nounwind { +define @intrinsic_vfmax_vf_nxv4f64_nxv4f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu @@ -1279,7 +1281,7 @@ entry: %a = call @llvm.riscv.vfmax.nxv4f64.f64( %0, double %1, - i64 %2) + iXLen %2) ret %a } @@ -1289,10 +1291,10 @@ declare @llvm.riscv.vfmax.mask.nxv4f64.f64( , double, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfmax_mask_vf_nxv4f64_nxv4f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { +define @intrinsic_vfmax_mask_vf_nxv4f64_nxv4f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu @@ -1304,7 +1306,7 @@ entry: %1, double %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -1312,9 +1314,9 @@ entry: declare @llvm.riscv.vfmax.nxv8f64.f64( , double, - i64); + iXLen); -define @intrinsic_vfmax_vf_nxv8f64_nxv8f64_f64( %0, double %1, i64 %2) nounwind { +define @intrinsic_vfmax_vf_nxv8f64_nxv8f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu @@ -1324,7 +1326,7 @@ entry: %a = call @llvm.riscv.vfmax.nxv8f64.f64( %0, double %1, - i64 %2) + iXLen %2) ret %a } @@ -1334,10 +1336,10 @@ declare @llvm.riscv.vfmax.mask.nxv8f64.f64( , double, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfmax_mask_vf_nxv8f64_nxv8f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { +define @intrinsic_vfmax_mask_vf_nxv8f64_nxv8f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu @@ -1349,7 +1351,7 @@ entry: %1, double %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmerge-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfmerge-rv32.ll deleted file mode 100644 index 3dc1240d0f7b63058c50ab3ea0cdfef1134b6000..0000000000000000000000000000000000000000 --- a/llvm/test/CodeGen/RISCV/rvv/vfmerge-rv32.ll +++ /dev/null @@ -1,902 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: -target-abi=ilp32d < %s | FileCheck %s -declare @llvm.riscv.vfmerge.nxv1f16.nxv1f16( - , - , - , - i32); - -define @intrinsic_vfmerge_vvm_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmerge_vvm_nxv1f16_nxv1f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmerge.nxv1f16.nxv1f16( - %0, - %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vfmerge.nxv1f16.f16( - , - half, - , - i32); - -define @intrinsic_vfmerge_vfm_nxv1f16_nxv1f16_f16( %0, half %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv1f16_nxv1f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmerge.nxv1f16.f16( - %0, - half %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vfmerge.nxv2f16.nxv2f16( - , - , - , - i32); - -define @intrinsic_vfmerge_vvm_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmerge_vvm_nxv2f16_nxv2f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmerge.nxv2f16.nxv2f16( - %0, - %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vfmerge.nxv2f16.f16( - , - half, - , - i32); - -define @intrinsic_vfmerge_vfm_nxv2f16_nxv2f16_f16( %0, half %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv2f16_nxv2f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmerge.nxv2f16.f16( - %0, - half %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vfmerge.nxv4f16.nxv4f16( - , - , - , - i32); - -define @intrinsic_vfmerge_vvm_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmerge_vvm_nxv4f16_nxv4f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmerge.nxv4f16.nxv4f16( - %0, - %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vfmerge.nxv4f16.f16( - , - half, - , - i32); - -define @intrinsic_vfmerge_vfm_nxv4f16_nxv4f16_f16( %0, half %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv4f16_nxv4f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmerge.nxv4f16.f16( - %0, - half %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vfmerge.nxv8f16.nxv8f16( - , - , - , - i32); - -define @intrinsic_vfmerge_vvm_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmerge_vvm_nxv8f16_nxv8f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vmerge.vvm v8, v8, v10, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmerge.nxv8f16.nxv8f16( - %0, - %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vfmerge.nxv8f16.f16( - , - half, - , - i32); - -define @intrinsic_vfmerge_vfm_nxv8f16_nxv8f16_f16( %0, half %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv8f16_nxv8f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmerge.nxv8f16.f16( - %0, - half %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vfmerge.nxv16f16.nxv16f16( - , - , - , - i32); - -define @intrinsic_vfmerge_vvm_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmerge_vvm_nxv16f16_nxv16f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vmerge.vvm v8, v8, v12, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmerge.nxv16f16.nxv16f16( - %0, - %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vfmerge.nxv16f16.f16( - , - half, - , - i32); - -define @intrinsic_vfmerge_vfm_nxv16f16_nxv16f16_f16( %0, half %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv16f16_nxv16f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmerge.nxv16f16.f16( - %0, - half %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vfmerge.nxv32f16.nxv32f16( - , - , - , - i32); - -define @intrinsic_vfmerge_vvm_nxv32f16_nxv32f16_nxv32f16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmerge_vvm_nxv32f16_nxv32f16_nxv32f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu -; CHECK-NEXT: vmerge.vvm v8, v8, v16, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmerge.nxv32f16.nxv32f16( - %0, - %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vfmerge.nxv32f16.f16( - , - half, - , - i32); - -define @intrinsic_vfmerge_vfm_nxv32f16_nxv32f16_f16( %0, half %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv32f16_nxv32f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu -; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmerge.nxv32f16.f16( - %0, - half %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vfmerge.nxv1f32.nxv1f32( - , - , - , - i32); - -define @intrinsic_vfmerge_vvm_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmerge_vvm_nxv1f32_nxv1f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmerge.nxv1f32.nxv1f32( - %0, - %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vfmerge.nxv1f32.f32( - , - float, - , - i32); - -define @intrinsic_vfmerge_vfm_nxv1f32_nxv1f32_f32( %0, float %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv1f32_nxv1f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmerge.nxv1f32.f32( - %0, - float %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vfmerge.nxv2f32.nxv2f32( - , - , - , - i32); - -define @intrinsic_vfmerge_vvm_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmerge_vvm_nxv2f32_nxv2f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmerge.nxv2f32.nxv2f32( - %0, - %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vfmerge.nxv2f32.f32( - , - float, - , - i32); - -define @intrinsic_vfmerge_vfm_nxv2f32_nxv2f32_f32( %0, float %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv2f32_nxv2f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmerge.nxv2f32.f32( - %0, - float %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vfmerge.nxv4f32.nxv4f32( - , - , - , - i32); - -define @intrinsic_vfmerge_vvm_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmerge_vvm_nxv4f32_nxv4f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vmerge.vvm v8, v8, v10, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmerge.nxv4f32.nxv4f32( - %0, - %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vfmerge.nxv4f32.f32( - , - float, - , - i32); - -define @intrinsic_vfmerge_vfm_nxv4f32_nxv4f32_f32( %0, float %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv4f32_nxv4f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmerge.nxv4f32.f32( - %0, - float %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vfmerge.nxv8f32.nxv8f32( - , - , - , - i32); - -define @intrinsic_vfmerge_vvm_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmerge_vvm_nxv8f32_nxv8f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vmerge.vvm v8, v8, v12, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmerge.nxv8f32.nxv8f32( - %0, - %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vfmerge.nxv8f32.f32( - , - float, - , - i32); - -define @intrinsic_vfmerge_vfm_nxv8f32_nxv8f32_f32( %0, float %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv8f32_nxv8f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmerge.nxv8f32.f32( - %0, - float %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vfmerge.nxv16f32.nxv16f32( - , - , - , - i32); - -define @intrinsic_vfmerge_vvm_nxv16f32_nxv16f32_nxv16f32( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmerge_vvm_nxv16f32_nxv16f32_nxv16f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu -; CHECK-NEXT: vmerge.vvm v8, v8, v16, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmerge.nxv16f32.nxv16f32( - %0, - %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vfmerge.nxv16f32.f32( - , - float, - , - i32); - -define @intrinsic_vfmerge_vfm_nxv16f32_nxv16f32_f32( %0, float %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv16f32_nxv16f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu -; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmerge.nxv16f32.f32( - %0, - float %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vfmerge.nxv1f64.nxv1f64( - , - , - , - i32); - -define @intrinsic_vfmerge_vvm_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmerge_vvm_nxv1f64_nxv1f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmerge.nxv1f64.nxv1f64( - %0, - %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vfmerge.nxv1f64.f64( - , - double, - , - i32); - -define @intrinsic_vfmerge_vfm_nxv1f64_nxv1f64_f64( %0, double %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv1f64_nxv1f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmerge.nxv1f64.f64( - %0, - double %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vfmerge.nxv2f64.nxv2f64( - , - , - , - i32); - -define @intrinsic_vfmerge_vvm_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmerge_vvm_nxv2f64_nxv2f64_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vmerge.vvm v8, v8, v10, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmerge.nxv2f64.nxv2f64( - %0, - %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vfmerge.nxv2f64.f64( - , - double, - , - i32); - -define @intrinsic_vfmerge_vfm_nxv2f64_nxv2f64_f64( %0, double %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv2f64_nxv2f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmerge.nxv2f64.f64( - %0, - double %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vfmerge.nxv4f64.nxv4f64( - , - , - , - i32); - -define @intrinsic_vfmerge_vvm_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmerge_vvm_nxv4f64_nxv4f64_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vmerge.vvm v8, v8, v12, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmerge.nxv4f64.nxv4f64( - %0, - %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vfmerge.nxv4f64.f64( - , - double, - , - i32); - -define @intrinsic_vfmerge_vfm_nxv4f64_nxv4f64_f64( %0, double %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv4f64_nxv4f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmerge.nxv4f64.f64( - %0, - double %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vfmerge.nxv8f64.nxv8f64( - , - , - , - i32); - -define @intrinsic_vfmerge_vvm_nxv8f64_nxv8f64_nxv8f64( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmerge_vvm_nxv8f64_nxv8f64_nxv8f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu -; CHECK-NEXT: vmerge.vvm v8, v8, v16, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmerge.nxv8f64.nxv8f64( - %0, - %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vfmerge.nxv8f64.f64( - , - double, - , - i32); - -define @intrinsic_vfmerge_vfm_nxv8f64_nxv8f64_f64( %0, double %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv8f64_nxv8f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu -; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmerge.nxv8f64.f64( - %0, - double %1, - %2, - i32 %3) - - ret %a -} - -define @intrinsic_vfmerge_vzm_nxv1f16_nxv1f16_f16( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv1f16_nxv1f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vmerge.vim v8, v8, 0, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmerge.nxv1f16.f16( - %0, - half zeroinitializer, - %1, - i32 %2) - - ret %a -} - -define @intrinsic_vfmerge_vzm_nxv2f16_nxv2f16_f16( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv2f16_nxv2f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vmerge.vim v8, v8, 0, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmerge.nxv2f16.f16( - %0, - half zeroinitializer, - %1, - i32 %2) - - ret %a -} - -define @intrinsic_vfmerge_vzm_nxv4f16_nxv4f16_f16( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv4f16_nxv4f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vmerge.vim v8, v8, 0, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmerge.nxv4f16.f16( - %0, - half zeroinitializer, - %1, - i32 %2) - - ret %a -} - -define @intrinsic_vfmerge_vzm_nxv8f16_nxv8f16_f16( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv8f16_nxv8f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vmerge.vim v8, v8, 0, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmerge.nxv8f16.f16( - %0, - half zeroinitializer, - %1, - i32 %2) - - ret %a -} - -define @intrinsic_vfmerge_vzm_nxv16f16_nxv16f16_f16( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv16f16_nxv16f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vmerge.vim v8, v8, 0, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmerge.nxv16f16.f16( - %0, - half zeroinitializer, - %1, - i32 %2) - - ret %a -} - -define @intrinsic_vfmerge_vzm_nxv32f16_nxv32f16_f16( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv32f16_nxv32f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu -; CHECK-NEXT: vmerge.vim v8, v8, 0, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmerge.nxv32f16.f16( - %0, - half zeroinitializer, - %1, - i32 %2) - - ret %a -} - -define @intrinsic_vfmerge_vzm_nxv1f32_nxv1f32_f32( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv1f32_nxv1f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vmerge.vim v8, v8, 0, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmerge.nxv1f32.f32( - %0, - float zeroinitializer, - %1, - i32 %2) - - ret %a -} - -define @intrinsic_vfmerge_vzm_nxv2f32_nxv2f32_f32( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv2f32_nxv2f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vmerge.vim v8, v8, 0, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmerge.nxv2f32.f32( - %0, - float zeroinitializer, - %1, - i32 %2) - - ret %a -} - -define @intrinsic_vfmerge_vzm_nxv4f32_nxv4f32_f32( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv4f32_nxv4f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vmerge.vim v8, v8, 0, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmerge.nxv4f32.f32( - %0, - float zeroinitializer, - %1, - i32 %2) - - ret %a -} - -define @intrinsic_vfmerge_vzm_nxv8f32_nxv8f32_f32( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv8f32_nxv8f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vmerge.vim v8, v8, 0, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmerge.nxv8f32.f32( - %0, - float zeroinitializer, - %1, - i32 %2) - - ret %a -} - -define @intrinsic_vfmerge_vzm_nxv16f32_nxv16f32_f32( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv16f32_nxv16f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu -; CHECK-NEXT: vmerge.vim v8, v8, 0, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmerge.nxv16f32.f32( - %0, - float zeroinitializer, - %1, - i32 %2) - - ret %a -} - -define @intrinsic_vfmerge_vzm_nxv1f64_nxv1f64_f64( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv1f64_nxv1f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vmerge.vim v8, v8, 0, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmerge.nxv1f64.f64( - %0, - double zeroinitializer, - %1, - i32 %2) - - ret %a -} - -define @intrinsic_vfmerge_vzm_nxv2f64_nxv2f64_f64( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv2f64_nxv2f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vmerge.vim v8, v8, 0, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmerge.nxv2f64.f64( - %0, - double zeroinitializer, - %1, - i32 %2) - - ret %a -} - -define @intrinsic_vfmerge_vzm_nxv4f64_nxv4f64_f64( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv4f64_nxv4f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vmerge.vim v8, v8, 0, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmerge.nxv4f64.f64( - %0, - double zeroinitializer, - %1, - i32 %2) - - ret %a -} - -define @intrinsic_vfmerge_vzm_nxv8f64_nxv8f64_f64( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv8f64_nxv8f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu -; CHECK-NEXT: vmerge.vim v8, v8, 0, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmerge.nxv8f64.f64( - %0, - double zeroinitializer, - %1, - i32 %2) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmerge-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfmerge.ll similarity index 88% rename from llvm/test/CodeGen/RISCV/rvv/vfmerge-rv64.ll rename to llvm/test/CodeGen/RISCV/rvv/vfmerge.ll index b23d908c7eddafe9955212ba368441a79e87567e..eb3efd1fa03736289c615ee6533230df5373b2ea 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfmerge-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmerge.ll @@ -1,13 +1,15 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: -target-abi=lp64d < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfmerge.nxv1f16.nxv1f16( , , , - i64); + iXLen); -define @intrinsic_vfmerge_vvm_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vfmerge_vvm_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vvm_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -18,7 +20,7 @@ entry: %0, %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -27,9 +29,9 @@ declare @llvm.riscv.vfmerge.nxv1f16.f16( , half, , - i64); + iXLen); -define @intrinsic_vfmerge_vfm_nxv1f16_nxv1f16_f16( %0, half %1, %2, i64 %3) nounwind { +define @intrinsic_vfmerge_vfm_nxv1f16_nxv1f16_f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -40,7 +42,7 @@ entry: %0, half %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -49,9 +51,9 @@ declare @llvm.riscv.vfmerge.nxv2f16.nxv2f16( , , , - i64); + iXLen); -define @intrinsic_vfmerge_vvm_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vfmerge_vvm_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vvm_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -62,7 +64,7 @@ entry: %0, %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -71,9 +73,9 @@ declare @llvm.riscv.vfmerge.nxv2f16.f16( , half, , - i64); + iXLen); -define @intrinsic_vfmerge_vfm_nxv2f16_nxv2f16_f16( %0, half %1, %2, i64 %3) nounwind { +define @intrinsic_vfmerge_vfm_nxv2f16_nxv2f16_f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -84,7 +86,7 @@ entry: %0, half %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -93,9 +95,9 @@ declare @llvm.riscv.vfmerge.nxv4f16.nxv4f16( , , , - i64); + iXLen); -define @intrinsic_vfmerge_vvm_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vfmerge_vvm_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vvm_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -106,7 +108,7 @@ entry: %0, %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -115,9 +117,9 @@ declare @llvm.riscv.vfmerge.nxv4f16.f16( , half, , - i64); + iXLen); -define @intrinsic_vfmerge_vfm_nxv4f16_nxv4f16_f16( %0, half %1, %2, i64 %3) nounwind { +define @intrinsic_vfmerge_vfm_nxv4f16_nxv4f16_f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -128,7 +130,7 @@ entry: %0, half %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -137,9 +139,9 @@ declare @llvm.riscv.vfmerge.nxv8f16.nxv8f16( , , , - i64); + iXLen); -define @intrinsic_vfmerge_vvm_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vfmerge_vvm_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vvm_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -150,7 +152,7 @@ entry: %0, %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -159,9 +161,9 @@ declare @llvm.riscv.vfmerge.nxv8f16.f16( , half, , - i64); + iXLen); -define @intrinsic_vfmerge_vfm_nxv8f16_nxv8f16_f16( %0, half %1, %2, i64 %3) nounwind { +define @intrinsic_vfmerge_vfm_nxv8f16_nxv8f16_f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -172,7 +174,7 @@ entry: %0, half %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -181,9 +183,9 @@ declare @llvm.riscv.vfmerge.nxv16f16.nxv16f16( , , , - i64); + iXLen); -define @intrinsic_vfmerge_vvm_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vfmerge_vvm_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vvm_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -194,7 +196,7 @@ entry: %0, %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -203,9 +205,9 @@ declare @llvm.riscv.vfmerge.nxv16f16.f16( , half, , - i64); + iXLen); -define @intrinsic_vfmerge_vfm_nxv16f16_nxv16f16_f16( %0, half %1, %2, i64 %3) nounwind { +define @intrinsic_vfmerge_vfm_nxv16f16_nxv16f16_f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -216,7 +218,7 @@ entry: %0, half %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -225,9 +227,9 @@ declare @llvm.riscv.vfmerge.nxv32f16.nxv32f16( , , , - i64); + iXLen); -define @intrinsic_vfmerge_vvm_nxv32f16_nxv32f16_nxv32f16( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vfmerge_vvm_nxv32f16_nxv32f16_nxv32f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vvm_nxv32f16_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu @@ -238,7 +240,7 @@ entry: %0, %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -247,9 +249,9 @@ declare @llvm.riscv.vfmerge.nxv32f16.f16( , half, , - i64); + iXLen); -define @intrinsic_vfmerge_vfm_nxv32f16_nxv32f16_f16( %0, half %1, %2, i64 %3) nounwind { +define @intrinsic_vfmerge_vfm_nxv32f16_nxv32f16_f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu @@ -260,7 +262,7 @@ entry: %0, half %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -269,9 +271,9 @@ declare @llvm.riscv.vfmerge.nxv1f32.nxv1f32( , , , - i64); + iXLen); -define @intrinsic_vfmerge_vvm_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vfmerge_vvm_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vvm_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -282,7 +284,7 @@ entry: %0, %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -291,9 +293,9 @@ declare @llvm.riscv.vfmerge.nxv1f32.f32( , float, , - i64); + iXLen); -define @intrinsic_vfmerge_vfm_nxv1f32_nxv1f32_f32( %0, float %1, %2, i64 %3) nounwind { +define @intrinsic_vfmerge_vfm_nxv1f32_nxv1f32_f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -304,7 +306,7 @@ entry: %0, float %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -313,9 +315,9 @@ declare @llvm.riscv.vfmerge.nxv2f32.nxv2f32( , , , - i64); + iXLen); -define @intrinsic_vfmerge_vvm_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vfmerge_vvm_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vvm_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -326,7 +328,7 @@ entry: %0, %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -335,9 +337,9 @@ declare @llvm.riscv.vfmerge.nxv2f32.f32( , float, , - i64); + iXLen); -define @intrinsic_vfmerge_vfm_nxv2f32_nxv2f32_f32( %0, float %1, %2, i64 %3) nounwind { +define @intrinsic_vfmerge_vfm_nxv2f32_nxv2f32_f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -348,7 +350,7 @@ entry: %0, float %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -357,9 +359,9 @@ declare @llvm.riscv.vfmerge.nxv4f32.nxv4f32( , , , - i64); + iXLen); -define @intrinsic_vfmerge_vvm_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vfmerge_vvm_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vvm_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -370,7 +372,7 @@ entry: %0, %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -379,9 +381,9 @@ declare @llvm.riscv.vfmerge.nxv4f32.f32( , float, , - i64); + iXLen); -define @intrinsic_vfmerge_vfm_nxv4f32_nxv4f32_f32( %0, float %1, %2, i64 %3) nounwind { +define @intrinsic_vfmerge_vfm_nxv4f32_nxv4f32_f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -392,7 +394,7 @@ entry: %0, float %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -401,9 +403,9 @@ declare @llvm.riscv.vfmerge.nxv8f32.nxv8f32( , , , - i64); + iXLen); -define @intrinsic_vfmerge_vvm_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vfmerge_vvm_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vvm_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -414,7 +416,7 @@ entry: %0, %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -423,9 +425,9 @@ declare @llvm.riscv.vfmerge.nxv8f32.f32( , float, , - i64); + iXLen); -define @intrinsic_vfmerge_vfm_nxv8f32_nxv8f32_f32( %0, float %1, %2, i64 %3) nounwind { +define @intrinsic_vfmerge_vfm_nxv8f32_nxv8f32_f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -436,7 +438,7 @@ entry: %0, float %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -445,9 +447,9 @@ declare @llvm.riscv.vfmerge.nxv16f32.nxv16f32( , , , - i64); + iXLen); -define @intrinsic_vfmerge_vvm_nxv16f32_nxv16f32_nxv16f32( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vfmerge_vvm_nxv16f32_nxv16f32_nxv16f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vvm_nxv16f32_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu @@ -458,7 +460,7 @@ entry: %0, %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -467,9 +469,9 @@ declare @llvm.riscv.vfmerge.nxv16f32.f32( , float, , - i64); + iXLen); -define @intrinsic_vfmerge_vfm_nxv16f32_nxv16f32_f32( %0, float %1, %2, i64 %3) nounwind { +define @intrinsic_vfmerge_vfm_nxv16f32_nxv16f32_f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu @@ -480,7 +482,7 @@ entry: %0, float %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -489,9 +491,9 @@ declare @llvm.riscv.vfmerge.nxv1f64.nxv1f64( , , , - i64); + iXLen); -define @intrinsic_vfmerge_vvm_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vfmerge_vvm_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vvm_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu @@ -502,7 +504,7 @@ entry: %0, %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -511,9 +513,9 @@ declare @llvm.riscv.vfmerge.nxv1f64.f64( , double, , - i64); + iXLen); -define @intrinsic_vfmerge_vfm_nxv1f64_nxv1f64_f64( %0, double %1, %2, i64 %3) nounwind { +define @intrinsic_vfmerge_vfm_nxv1f64_nxv1f64_f64( %0, double %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu @@ -524,7 +526,7 @@ entry: %0, double %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -533,9 +535,9 @@ declare @llvm.riscv.vfmerge.nxv2f64.nxv2f64( , , , - i64); + iXLen); -define @intrinsic_vfmerge_vvm_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vfmerge_vvm_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vvm_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu @@ -546,7 +548,7 @@ entry: %0, %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -555,9 +557,9 @@ declare @llvm.riscv.vfmerge.nxv2f64.f64( , double, , - i64); + iXLen); -define @intrinsic_vfmerge_vfm_nxv2f64_nxv2f64_f64( %0, double %1, %2, i64 %3) nounwind { +define @intrinsic_vfmerge_vfm_nxv2f64_nxv2f64_f64( %0, double %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu @@ -568,7 +570,7 @@ entry: %0, double %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -577,9 +579,9 @@ declare @llvm.riscv.vfmerge.nxv4f64.nxv4f64( , , , - i64); + iXLen); -define @intrinsic_vfmerge_vvm_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vfmerge_vvm_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vvm_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu @@ -590,7 +592,7 @@ entry: %0, %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -599,9 +601,9 @@ declare @llvm.riscv.vfmerge.nxv4f64.f64( , double, , - i64); + iXLen); -define @intrinsic_vfmerge_vfm_nxv4f64_nxv4f64_f64( %0, double %1, %2, i64 %3) nounwind { +define @intrinsic_vfmerge_vfm_nxv4f64_nxv4f64_f64( %0, double %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu @@ -612,7 +614,7 @@ entry: %0, double %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -621,9 +623,9 @@ declare @llvm.riscv.vfmerge.nxv8f64.nxv8f64( , , , - i64); + iXLen); -define @intrinsic_vfmerge_vvm_nxv8f64_nxv8f64_nxv8f64( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vfmerge_vvm_nxv8f64_nxv8f64_nxv8f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vvm_nxv8f64_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu @@ -634,7 +636,7 @@ entry: %0, %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -643,9 +645,9 @@ declare @llvm.riscv.vfmerge.nxv8f64.f64( , double, , - i64); + iXLen); -define @intrinsic_vfmerge_vfm_nxv8f64_nxv8f64_f64( %0, double %1, %2, i64 %3) nounwind { +define @intrinsic_vfmerge_vfm_nxv8f64_nxv8f64_f64( %0, double %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu @@ -656,12 +658,12 @@ entry: %0, double %1, %2, - i64 %3) + iXLen %3) ret %a } -define @intrinsic_vfmerge_vzm_nxv1f16_nxv1f16_f16( %0, %1, i64 %2) nounwind { +define @intrinsic_vfmerge_vzm_nxv1f16_nxv1f16_f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -672,12 +674,12 @@ entry: %0, half zeroinitializer, %1, - i64 %2) + iXLen %2) ret %a } -define @intrinsic_vfmerge_vzm_nxv2f16_nxv2f16_f16( %0, %1, i64 %2) nounwind { +define @intrinsic_vfmerge_vzm_nxv2f16_nxv2f16_f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -688,12 +690,12 @@ entry: %0, half zeroinitializer, %1, - i64 %2) + iXLen %2) ret %a } -define @intrinsic_vfmerge_vzm_nxv4f16_nxv4f16_f16( %0, %1, i64 %2) nounwind { +define @intrinsic_vfmerge_vzm_nxv4f16_nxv4f16_f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -704,12 +706,12 @@ entry: %0, half zeroinitializer, %1, - i64 %2) + iXLen %2) ret %a } -define @intrinsic_vfmerge_vzm_nxv8f16_nxv8f16_f16( %0, %1, i64 %2) nounwind { +define @intrinsic_vfmerge_vzm_nxv8f16_nxv8f16_f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -720,12 +722,12 @@ entry: %0, half zeroinitializer, %1, - i64 %2) + iXLen %2) ret %a } -define @intrinsic_vfmerge_vzm_nxv16f16_nxv16f16_f16( %0, %1, i64 %2) nounwind { +define @intrinsic_vfmerge_vzm_nxv16f16_nxv16f16_f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -736,12 +738,12 @@ entry: %0, half zeroinitializer, %1, - i64 %2) + iXLen %2) ret %a } -define @intrinsic_vfmerge_vzm_nxv32f16_nxv32f16_f16( %0, %1, i64 %2) nounwind { +define @intrinsic_vfmerge_vzm_nxv32f16_nxv32f16_f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu @@ -752,12 +754,12 @@ entry: %0, half zeroinitializer, %1, - i64 %2) + iXLen %2) ret %a } -define @intrinsic_vfmerge_vzm_nxv1f32_nxv1f32_f32( %0, %1, i64 %2) nounwind { +define @intrinsic_vfmerge_vzm_nxv1f32_nxv1f32_f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -768,12 +770,12 @@ entry: %0, float zeroinitializer, %1, - i64 %2) + iXLen %2) ret %a } -define @intrinsic_vfmerge_vzm_nxv2f32_nxv2f32_f32( %0, %1, i64 %2) nounwind { +define @intrinsic_vfmerge_vzm_nxv2f32_nxv2f32_f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -784,12 +786,12 @@ entry: %0, float zeroinitializer, %1, - i64 %2) + iXLen %2) ret %a } -define @intrinsic_vfmerge_vzm_nxv4f32_nxv4f32_f32( %0, %1, i64 %2) nounwind { +define @intrinsic_vfmerge_vzm_nxv4f32_nxv4f32_f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -800,12 +802,12 @@ entry: %0, float zeroinitializer, %1, - i64 %2) + iXLen %2) ret %a } -define @intrinsic_vfmerge_vzm_nxv8f32_nxv8f32_f32( %0, %1, i64 %2) nounwind { +define @intrinsic_vfmerge_vzm_nxv8f32_nxv8f32_f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -816,12 +818,12 @@ entry: %0, float zeroinitializer, %1, - i64 %2) + iXLen %2) ret %a } -define @intrinsic_vfmerge_vzm_nxv16f32_nxv16f32_f32( %0, %1, i64 %2) nounwind { +define @intrinsic_vfmerge_vzm_nxv16f32_nxv16f32_f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu @@ -832,12 +834,12 @@ entry: %0, float zeroinitializer, %1, - i64 %2) + iXLen %2) ret %a } -define @intrinsic_vfmerge_vzm_nxv1f64_nxv1f64_f64( %0, %1, i64 %2) nounwind { +define @intrinsic_vfmerge_vzm_nxv1f64_nxv1f64_f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu @@ -848,12 +850,12 @@ entry: %0, double zeroinitializer, %1, - i64 %2) + iXLen %2) ret %a } -define @intrinsic_vfmerge_vzm_nxv2f64_nxv2f64_f64( %0, %1, i64 %2) nounwind { +define @intrinsic_vfmerge_vzm_nxv2f64_nxv2f64_f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu @@ -864,12 +866,12 @@ entry: %0, double zeroinitializer, %1, - i64 %2) + iXLen %2) ret %a } -define @intrinsic_vfmerge_vzm_nxv4f64_nxv4f64_f64( %0, %1, i64 %2) nounwind { +define @intrinsic_vfmerge_vzm_nxv4f64_nxv4f64_f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu @@ -880,12 +882,12 @@ entry: %0, double zeroinitializer, %1, - i64 %2) + iXLen %2) ret %a } -define @intrinsic_vfmerge_vzm_nxv8f64_nxv8f64_f64( %0, %1, i64 %2) nounwind { +define @intrinsic_vfmerge_vzm_nxv8f64_nxv8f64_f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu @@ -896,7 +898,7 @@ entry: %0, double zeroinitializer, %1, - i64 %2) + iXLen %2) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmin-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfmin-rv32.ll deleted file mode 100644 index 0861a787440e47718e2857525a573f34a8105d70..0000000000000000000000000000000000000000 --- a/llvm/test/CodeGen/RISCV/rvv/vfmin-rv32.ll +++ /dev/null @@ -1,1355 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: -target-abi=ilp32d < %s | FileCheck %s -declare @llvm.riscv.vfmin.nxv1f16.nxv1f16( - , - , - i32); - -define @intrinsic_vfmin_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmin_vv_nxv1f16_nxv1f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfmin.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmin.nxv1f16.nxv1f16( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfmin.mask.nxv1f16.nxv1f16( - , - , - , - , - i32, - i32); - -define @intrinsic_vfmin_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv1f16_nxv1f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfmin.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmin.mask.nxv1f16.nxv1f16( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfmin.nxv2f16.nxv2f16( - , - , - i32); - -define @intrinsic_vfmin_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmin_vv_nxv2f16_nxv2f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfmin.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmin.nxv2f16.nxv2f16( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfmin.mask.nxv2f16.nxv2f16( - , - , - , - , - i32, - i32); - -define @intrinsic_vfmin_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv2f16_nxv2f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfmin.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmin.mask.nxv2f16.nxv2f16( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfmin.nxv4f16.nxv4f16( - , - , - i32); - -define @intrinsic_vfmin_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmin_vv_nxv4f16_nxv4f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfmin.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmin.nxv4f16.nxv4f16( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfmin.mask.nxv4f16.nxv4f16( - , - , - , - , - i32, - i32); - -define @intrinsic_vfmin_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv4f16_nxv4f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfmin.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmin.mask.nxv4f16.nxv4f16( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfmin.nxv8f16.nxv8f16( - , - , - i32); - -define @intrinsic_vfmin_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmin_vv_nxv8f16_nxv8f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfmin.vv v8, v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmin.nxv8f16.nxv8f16( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfmin.mask.nxv8f16.nxv8f16( - , - , - , - , - i32, - i32); - -define @intrinsic_vfmin_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv8f16_nxv8f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfmin.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmin.mask.nxv8f16.nxv8f16( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfmin.nxv16f16.nxv16f16( - , - , - i32); - -define @intrinsic_vfmin_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmin_vv_nxv16f16_nxv16f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfmin.vv v8, v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmin.nxv16f16.nxv16f16( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfmin.mask.nxv16f16.nxv16f16( - , - , - , - , - i32, - i32); - -define @intrinsic_vfmin_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv16f16_nxv16f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfmin.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmin.mask.nxv16f16.nxv16f16( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfmin.nxv32f16.nxv32f16( - , - , - i32); - -define @intrinsic_vfmin_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmin_vv_nxv32f16_nxv32f16_nxv32f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu -; CHECK-NEXT: vfmin.vv v8, v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmin.nxv32f16.nxv32f16( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfmin.mask.nxv32f16.nxv32f16( - , - , - , - , - i32, - i32); - -define @intrinsic_vfmin_mask_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv32f16_nxv32f16_nxv32f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu -; CHECK-NEXT: vfmin.vv v8, v16, v24, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmin.mask.nxv32f16.nxv32f16( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfmin.nxv1f32.nxv1f32( - , - , - i32); - -define @intrinsic_vfmin_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmin_vv_nxv1f32_nxv1f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfmin.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmin.nxv1f32.nxv1f32( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfmin.mask.nxv1f32.nxv1f32( - , - , - , - , - i32, - i32); - -define @intrinsic_vfmin_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv1f32_nxv1f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfmin.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmin.mask.nxv1f32.nxv1f32( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfmin.nxv2f32.nxv2f32( - , - , - i32); - -define @intrinsic_vfmin_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmin_vv_nxv2f32_nxv2f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfmin.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmin.nxv2f32.nxv2f32( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfmin.mask.nxv2f32.nxv2f32( - , - , - , - , - i32, - i32); - -define @intrinsic_vfmin_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv2f32_nxv2f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfmin.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmin.mask.nxv2f32.nxv2f32( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfmin.nxv4f32.nxv4f32( - , - , - i32); - -define @intrinsic_vfmin_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmin_vv_nxv4f32_nxv4f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfmin.vv v8, v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmin.nxv4f32.nxv4f32( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfmin.mask.nxv4f32.nxv4f32( - , - , - , - , - i32, - i32); - -define @intrinsic_vfmin_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv4f32_nxv4f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfmin.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmin.mask.nxv4f32.nxv4f32( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfmin.nxv8f32.nxv8f32( - , - , - i32); - -define @intrinsic_vfmin_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmin_vv_nxv8f32_nxv8f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfmin.vv v8, v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmin.nxv8f32.nxv8f32( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfmin.mask.nxv8f32.nxv8f32( - , - , - , - , - i32, - i32); - -define @intrinsic_vfmin_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv8f32_nxv8f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfmin.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmin.mask.nxv8f32.nxv8f32( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfmin.nxv16f32.nxv16f32( - , - , - i32); - -define @intrinsic_vfmin_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmin_vv_nxv16f32_nxv16f32_nxv16f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu -; CHECK-NEXT: vfmin.vv v8, v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmin.nxv16f32.nxv16f32( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfmin.mask.nxv16f32.nxv16f32( - , - , - , - , - i32, - i32); - -define @intrinsic_vfmin_mask_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv16f32_nxv16f32_nxv16f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vl8re32.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu -; CHECK-NEXT: vfmin.vv v8, v16, v24, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmin.mask.nxv16f32.nxv16f32( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfmin.nxv1f64.nxv1f64( - , - , - i32); - -define @intrinsic_vfmin_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmin_vv_nxv1f64_nxv1f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vfmin.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmin.nxv1f64.nxv1f64( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfmin.mask.nxv1f64.nxv1f64( - , - , - , - , - i32, - i32); - -define @intrinsic_vfmin_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv1f64_nxv1f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vfmin.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmin.mask.nxv1f64.nxv1f64( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfmin.nxv2f64.nxv2f64( - , - , - i32); - -define @intrinsic_vfmin_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmin_vv_nxv2f64_nxv2f64_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vfmin.vv v8, v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmin.nxv2f64.nxv2f64( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfmin.mask.nxv2f64.nxv2f64( - , - , - , - , - i32, - i32); - -define @intrinsic_vfmin_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv2f64_nxv2f64_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vfmin.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmin.mask.nxv2f64.nxv2f64( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfmin.nxv4f64.nxv4f64( - , - , - i32); - -define @intrinsic_vfmin_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmin_vv_nxv4f64_nxv4f64_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vfmin.vv v8, v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmin.nxv4f64.nxv4f64( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfmin.mask.nxv4f64.nxv4f64( - , - , - , - , - i32, - i32); - -define @intrinsic_vfmin_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv4f64_nxv4f64_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vfmin.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmin.mask.nxv4f64.nxv4f64( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfmin.nxv8f64.nxv8f64( - , - , - i32); - -define @intrinsic_vfmin_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmin_vv_nxv8f64_nxv8f64_nxv8f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu -; CHECK-NEXT: vfmin.vv v8, v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmin.nxv8f64.nxv8f64( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfmin.mask.nxv8f64.nxv8f64( - , - , - , - , - i32, - i32); - -define @intrinsic_vfmin_mask_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv8f64_nxv8f64_nxv8f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vl8re64.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu -; CHECK-NEXT: vfmin.vv v8, v16, v24, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmin.mask.nxv8f64.nxv8f64( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfmin.nxv1f16.f16( - , - half, - i32); - -define @intrinsic_vfmin_vf_nxv1f16_nxv1f16_f16( %0, half %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmin_vf_nxv1f16_nxv1f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfmin.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmin.nxv1f16.f16( - %0, - half %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfmin.mask.nxv1f16.f16( - , - , - half, - , - i32, - i32); - -define @intrinsic_vfmin_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv1f16_nxv1f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfmin.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmin.mask.nxv1f16.f16( - %0, - %1, - half %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfmin.nxv2f16.f16( - , - half, - i32); - -define @intrinsic_vfmin_vf_nxv2f16_nxv2f16_f16( %0, half %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmin_vf_nxv2f16_nxv2f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfmin.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmin.nxv2f16.f16( - %0, - half %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfmin.mask.nxv2f16.f16( - , - , - half, - , - i32, - i32); - -define @intrinsic_vfmin_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv2f16_nxv2f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfmin.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmin.mask.nxv2f16.f16( - %0, - %1, - half %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfmin.nxv4f16.f16( - , - half, - i32); - -define @intrinsic_vfmin_vf_nxv4f16_nxv4f16_f16( %0, half %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmin_vf_nxv4f16_nxv4f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfmin.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmin.nxv4f16.f16( - %0, - half %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfmin.mask.nxv4f16.f16( - , - , - half, - , - i32, - i32); - -define @intrinsic_vfmin_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv4f16_nxv4f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfmin.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmin.mask.nxv4f16.f16( - %0, - %1, - half %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfmin.nxv8f16.f16( - , - half, - i32); - -define @intrinsic_vfmin_vf_nxv8f16_nxv8f16_f16( %0, half %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmin_vf_nxv8f16_nxv8f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfmin.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmin.nxv8f16.f16( - %0, - half %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfmin.mask.nxv8f16.f16( - , - , - half, - , - i32, - i32); - -define @intrinsic_vfmin_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv8f16_nxv8f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfmin.vf v8, v10, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmin.mask.nxv8f16.f16( - %0, - %1, - half %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfmin.nxv16f16.f16( - , - half, - i32); - -define @intrinsic_vfmin_vf_nxv16f16_nxv16f16_f16( %0, half %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmin_vf_nxv16f16_nxv16f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfmin.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmin.nxv16f16.f16( - %0, - half %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfmin.mask.nxv16f16.f16( - , - , - half, - , - i32, - i32); - -define @intrinsic_vfmin_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv16f16_nxv16f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfmin.vf v8, v12, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmin.mask.nxv16f16.f16( - %0, - %1, - half %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfmin.nxv32f16.f16( - , - half, - i32); - -define @intrinsic_vfmin_vf_nxv32f16_nxv32f16_f16( %0, half %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmin_vf_nxv32f16_nxv32f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu -; CHECK-NEXT: vfmin.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmin.nxv32f16.f16( - %0, - half %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfmin.mask.nxv32f16.f16( - , - , - half, - , - i32, - i32); - -define @intrinsic_vfmin_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv32f16_nxv32f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu -; CHECK-NEXT: vfmin.vf v8, v16, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmin.mask.nxv32f16.f16( - %0, - %1, - half %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfmin.nxv1f32.f32( - , - float, - i32); - -define @intrinsic_vfmin_vf_nxv1f32_nxv1f32_f32( %0, float %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmin_vf_nxv1f32_nxv1f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfmin.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmin.nxv1f32.f32( - %0, - float %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfmin.mask.nxv1f32.f32( - , - , - float, - , - i32, - i32); - -define @intrinsic_vfmin_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv1f32_nxv1f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfmin.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmin.mask.nxv1f32.f32( - %0, - %1, - float %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfmin.nxv2f32.f32( - , - float, - i32); - -define @intrinsic_vfmin_vf_nxv2f32_nxv2f32_f32( %0, float %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmin_vf_nxv2f32_nxv2f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfmin.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmin.nxv2f32.f32( - %0, - float %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfmin.mask.nxv2f32.f32( - , - , - float, - , - i32, - i32); - -define @intrinsic_vfmin_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv2f32_nxv2f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfmin.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmin.mask.nxv2f32.f32( - %0, - %1, - float %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfmin.nxv4f32.f32( - , - float, - i32); - -define @intrinsic_vfmin_vf_nxv4f32_nxv4f32_f32( %0, float %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmin_vf_nxv4f32_nxv4f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfmin.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmin.nxv4f32.f32( - %0, - float %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfmin.mask.nxv4f32.f32( - , - , - float, - , - i32, - i32); - -define @intrinsic_vfmin_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv4f32_nxv4f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfmin.vf v8, v10, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmin.mask.nxv4f32.f32( - %0, - %1, - float %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfmin.nxv8f32.f32( - , - float, - i32); - -define @intrinsic_vfmin_vf_nxv8f32_nxv8f32_f32( %0, float %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmin_vf_nxv8f32_nxv8f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfmin.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmin.nxv8f32.f32( - %0, - float %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfmin.mask.nxv8f32.f32( - , - , - float, - , - i32, - i32); - -define @intrinsic_vfmin_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv8f32_nxv8f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfmin.vf v8, v12, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmin.mask.nxv8f32.f32( - %0, - %1, - float %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfmin.nxv16f32.f32( - , - float, - i32); - -define @intrinsic_vfmin_vf_nxv16f32_nxv16f32_f32( %0, float %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmin_vf_nxv16f32_nxv16f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu -; CHECK-NEXT: vfmin.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmin.nxv16f32.f32( - %0, - float %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfmin.mask.nxv16f32.f32( - , - , - float, - , - i32, - i32); - -define @intrinsic_vfmin_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv16f32_nxv16f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu -; CHECK-NEXT: vfmin.vf v8, v16, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmin.mask.nxv16f32.f32( - %0, - %1, - float %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfmin.nxv1f64.f64( - , - double, - i32); - -define @intrinsic_vfmin_vf_nxv1f64_nxv1f64_f64( %0, double %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmin_vf_nxv1f64_nxv1f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vfmin.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmin.nxv1f64.f64( - %0, - double %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfmin.mask.nxv1f64.f64( - , - , - double, - , - i32, - i32); - -define @intrinsic_vfmin_mask_vf_nxv1f64_nxv1f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv1f64_nxv1f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vfmin.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmin.mask.nxv1f64.f64( - %0, - %1, - double %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfmin.nxv2f64.f64( - , - double, - i32); - -define @intrinsic_vfmin_vf_nxv2f64_nxv2f64_f64( %0, double %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmin_vf_nxv2f64_nxv2f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vfmin.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmin.nxv2f64.f64( - %0, - double %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfmin.mask.nxv2f64.f64( - , - , - double, - , - i32, - i32); - -define @intrinsic_vfmin_mask_vf_nxv2f64_nxv2f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv2f64_nxv2f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vfmin.vf v8, v10, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmin.mask.nxv2f64.f64( - %0, - %1, - double %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfmin.nxv4f64.f64( - , - double, - i32); - -define @intrinsic_vfmin_vf_nxv4f64_nxv4f64_f64( %0, double %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmin_vf_nxv4f64_nxv4f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vfmin.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmin.nxv4f64.f64( - %0, - double %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfmin.mask.nxv4f64.f64( - , - , - double, - , - i32, - i32); - -define @intrinsic_vfmin_mask_vf_nxv4f64_nxv4f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv4f64_nxv4f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vfmin.vf v8, v12, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmin.mask.nxv4f64.f64( - %0, - %1, - double %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfmin.nxv8f64.f64( - , - double, - i32); - -define @intrinsic_vfmin_vf_nxv8f64_nxv8f64_f64( %0, double %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmin_vf_nxv8f64_nxv8f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu -; CHECK-NEXT: vfmin.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmin.nxv8f64.f64( - %0, - double %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfmin.mask.nxv8f64.f64( - , - , - double, - , - i32, - i32); - -define @intrinsic_vfmin_mask_vf_nxv8f64_nxv8f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv8f64_nxv8f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu -; CHECK-NEXT: vfmin.vf v8, v16, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmin.mask.nxv8f64.f64( - %0, - %1, - double %2, - %3, - i32 %4, i32 1) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmin-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfmin.ll similarity index 86% rename from llvm/test/CodeGen/RISCV/rvv/vfmin-rv64.ll rename to llvm/test/CodeGen/RISCV/rvv/vfmin.ll index e647fe51ffb1716e1022ea2f20af4a093bf4d7d0..e151e9fb695de19cbd2a35dc1e6472f0f86acd0e 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfmin-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmin.ll @@ -1,12 +1,14 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: -target-abi=lp64d < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfmin.nxv1f16.nxv1f16( , , - i64); + iXLen); -define @intrinsic_vfmin_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, i64 %2) nounwind { +define @intrinsic_vfmin_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -16,7 +18,7 @@ entry: %a = call @llvm.riscv.vfmin.nxv1f16.nxv1f16( %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -26,10 +28,10 @@ declare @llvm.riscv.vfmin.mask.nxv1f16.nxv1f16( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfmin_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfmin_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -41,7 +43,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -49,9 +51,9 @@ entry: declare @llvm.riscv.vfmin.nxv2f16.nxv2f16( , , - i64); + iXLen); -define @intrinsic_vfmin_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, i64 %2) nounwind { +define @intrinsic_vfmin_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -61,7 +63,7 @@ entry: %a = call @llvm.riscv.vfmin.nxv2f16.nxv2f16( %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -71,10 +73,10 @@ declare @llvm.riscv.vfmin.mask.nxv2f16.nxv2f16( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfmin_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfmin_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -86,7 +88,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -94,9 +96,9 @@ entry: declare @llvm.riscv.vfmin.nxv4f16.nxv4f16( , , - i64); + iXLen); -define @intrinsic_vfmin_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, i64 %2) nounwind { +define @intrinsic_vfmin_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -106,7 +108,7 @@ entry: %a = call @llvm.riscv.vfmin.nxv4f16.nxv4f16( %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -116,10 +118,10 @@ declare @llvm.riscv.vfmin.mask.nxv4f16.nxv4f16( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfmin_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfmin_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -131,7 +133,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -139,9 +141,9 @@ entry: declare @llvm.riscv.vfmin.nxv8f16.nxv8f16( , , - i64); + iXLen); -define @intrinsic_vfmin_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, i64 %2) nounwind { +define @intrinsic_vfmin_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -151,7 +153,7 @@ entry: %a = call @llvm.riscv.vfmin.nxv8f16.nxv8f16( %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -161,10 +163,10 @@ declare @llvm.riscv.vfmin.mask.nxv8f16.nxv8f16( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfmin_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfmin_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -176,7 +178,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -184,9 +186,9 @@ entry: declare @llvm.riscv.vfmin.nxv16f16.nxv16f16( , , - i64); + iXLen); -define @intrinsic_vfmin_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, i64 %2) nounwind { +define @intrinsic_vfmin_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -196,7 +198,7 @@ entry: %a = call @llvm.riscv.vfmin.nxv16f16.nxv16f16( %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -206,10 +208,10 @@ declare @llvm.riscv.vfmin.mask.nxv16f16.nxv16f16( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfmin_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfmin_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -221,7 +223,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -229,9 +231,9 @@ entry: declare @llvm.riscv.vfmin.nxv32f16.nxv32f16( , , - i64); + iXLen); -define @intrinsic_vfmin_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, i64 %2) nounwind { +define @intrinsic_vfmin_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vv_nxv32f16_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu @@ -241,7 +243,7 @@ entry: %a = call @llvm.riscv.vfmin.nxv32f16.nxv32f16( %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -251,10 +253,10 @@ declare @llvm.riscv.vfmin.mask.nxv32f16.nxv32f16( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfmin_mask_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfmin_mask_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv32f16_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re16.v v24, (a0) @@ -267,7 +269,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -275,9 +277,9 @@ entry: declare @llvm.riscv.vfmin.nxv1f32.nxv1f32( , , - i64); + iXLen); -define @intrinsic_vfmin_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, i64 %2) nounwind { +define @intrinsic_vfmin_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -287,7 +289,7 @@ entry: %a = call @llvm.riscv.vfmin.nxv1f32.nxv1f32( %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -297,10 +299,10 @@ declare @llvm.riscv.vfmin.mask.nxv1f32.nxv1f32( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfmin_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfmin_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -312,7 +314,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -320,9 +322,9 @@ entry: declare @llvm.riscv.vfmin.nxv2f32.nxv2f32( , , - i64); + iXLen); -define @intrinsic_vfmin_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, i64 %2) nounwind { +define @intrinsic_vfmin_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -332,7 +334,7 @@ entry: %a = call @llvm.riscv.vfmin.nxv2f32.nxv2f32( %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -342,10 +344,10 @@ declare @llvm.riscv.vfmin.mask.nxv2f32.nxv2f32( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfmin_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfmin_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -357,7 +359,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -365,9 +367,9 @@ entry: declare @llvm.riscv.vfmin.nxv4f32.nxv4f32( , , - i64); + iXLen); -define @intrinsic_vfmin_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, i64 %2) nounwind { +define @intrinsic_vfmin_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -377,7 +379,7 @@ entry: %a = call @llvm.riscv.vfmin.nxv4f32.nxv4f32( %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -387,10 +389,10 @@ declare @llvm.riscv.vfmin.mask.nxv4f32.nxv4f32( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfmin_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfmin_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -402,7 +404,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -410,9 +412,9 @@ entry: declare @llvm.riscv.vfmin.nxv8f32.nxv8f32( , , - i64); + iXLen); -define @intrinsic_vfmin_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, i64 %2) nounwind { +define @intrinsic_vfmin_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -422,7 +424,7 @@ entry: %a = call @llvm.riscv.vfmin.nxv8f32.nxv8f32( %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -432,10 +434,10 @@ declare @llvm.riscv.vfmin.mask.nxv8f32.nxv8f32( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfmin_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfmin_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -447,7 +449,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -455,9 +457,9 @@ entry: declare @llvm.riscv.vfmin.nxv16f32.nxv16f32( , , - i64); + iXLen); -define @intrinsic_vfmin_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, i64 %2) nounwind { +define @intrinsic_vfmin_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vv_nxv16f32_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu @@ -467,7 +469,7 @@ entry: %a = call @llvm.riscv.vfmin.nxv16f32.nxv16f32( %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -477,10 +479,10 @@ declare @llvm.riscv.vfmin.mask.nxv16f32.nxv16f32( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfmin_mask_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfmin_mask_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv16f32_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re32.v v24, (a0) @@ -493,7 +495,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -501,9 +503,9 @@ entry: declare @llvm.riscv.vfmin.nxv1f64.nxv1f64( , , - i64); + iXLen); -define @intrinsic_vfmin_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, i64 %2) nounwind { +define @intrinsic_vfmin_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu @@ -513,7 +515,7 @@ entry: %a = call @llvm.riscv.vfmin.nxv1f64.nxv1f64( %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -523,10 +525,10 @@ declare @llvm.riscv.vfmin.mask.nxv1f64.nxv1f64( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfmin_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfmin_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu @@ -538,7 +540,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -546,9 +548,9 @@ entry: declare @llvm.riscv.vfmin.nxv2f64.nxv2f64( , , - i64); + iXLen); -define @intrinsic_vfmin_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, i64 %2) nounwind { +define @intrinsic_vfmin_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu @@ -558,7 +560,7 @@ entry: %a = call @llvm.riscv.vfmin.nxv2f64.nxv2f64( %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -568,10 +570,10 @@ declare @llvm.riscv.vfmin.mask.nxv2f64.nxv2f64( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfmin_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfmin_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu @@ -583,7 +585,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -591,9 +593,9 @@ entry: declare @llvm.riscv.vfmin.nxv4f64.nxv4f64( , , - i64); + iXLen); -define @intrinsic_vfmin_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, i64 %2) nounwind { +define @intrinsic_vfmin_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu @@ -603,7 +605,7 @@ entry: %a = call @llvm.riscv.vfmin.nxv4f64.nxv4f64( %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -613,10 +615,10 @@ declare @llvm.riscv.vfmin.mask.nxv4f64.nxv4f64( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfmin_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfmin_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu @@ -628,7 +630,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -636,9 +638,9 @@ entry: declare @llvm.riscv.vfmin.nxv8f64.nxv8f64( , , - i64); + iXLen); -define @intrinsic_vfmin_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, i64 %2) nounwind { +define @intrinsic_vfmin_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vv_nxv8f64_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu @@ -648,7 +650,7 @@ entry: %a = call @llvm.riscv.vfmin.nxv8f64.nxv8f64( %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -658,10 +660,10 @@ declare @llvm.riscv.vfmin.mask.nxv8f64.nxv8f64( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfmin_mask_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfmin_mask_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv8f64_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re64.v v24, (a0) @@ -674,7 +676,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -682,9 +684,9 @@ entry: declare @llvm.riscv.vfmin.nxv1f16.f16( , half, - i64); + iXLen); -define @intrinsic_vfmin_vf_nxv1f16_nxv1f16_f16( %0, half %1, i64 %2) nounwind { +define @intrinsic_vfmin_vf_nxv1f16_nxv1f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -694,7 +696,7 @@ entry: %a = call @llvm.riscv.vfmin.nxv1f16.f16( %0, half %1, - i64 %2) + iXLen %2) ret %a } @@ -704,10 +706,10 @@ declare @llvm.riscv.vfmin.mask.nxv1f16.f16( , half, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfmin_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { +define @intrinsic_vfmin_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -719,7 +721,7 @@ entry: %1, half %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -727,9 +729,9 @@ entry: declare @llvm.riscv.vfmin.nxv2f16.f16( , half, - i64); + iXLen); -define @intrinsic_vfmin_vf_nxv2f16_nxv2f16_f16( %0, half %1, i64 %2) nounwind { +define @intrinsic_vfmin_vf_nxv2f16_nxv2f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -739,7 +741,7 @@ entry: %a = call @llvm.riscv.vfmin.nxv2f16.f16( %0, half %1, - i64 %2) + iXLen %2) ret %a } @@ -749,10 +751,10 @@ declare @llvm.riscv.vfmin.mask.nxv2f16.f16( , half, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfmin_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { +define @intrinsic_vfmin_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -764,7 +766,7 @@ entry: %1, half %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -772,9 +774,9 @@ entry: declare @llvm.riscv.vfmin.nxv4f16.f16( , half, - i64); + iXLen); -define @intrinsic_vfmin_vf_nxv4f16_nxv4f16_f16( %0, half %1, i64 %2) nounwind { +define @intrinsic_vfmin_vf_nxv4f16_nxv4f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -784,7 +786,7 @@ entry: %a = call @llvm.riscv.vfmin.nxv4f16.f16( %0, half %1, - i64 %2) + iXLen %2) ret %a } @@ -794,10 +796,10 @@ declare @llvm.riscv.vfmin.mask.nxv4f16.f16( , half, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfmin_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { +define @intrinsic_vfmin_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -809,7 +811,7 @@ entry: %1, half %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -817,9 +819,9 @@ entry: declare @llvm.riscv.vfmin.nxv8f16.f16( , half, - i64); + iXLen); -define @intrinsic_vfmin_vf_nxv8f16_nxv8f16_f16( %0, half %1, i64 %2) nounwind { +define @intrinsic_vfmin_vf_nxv8f16_nxv8f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -829,7 +831,7 @@ entry: %a = call @llvm.riscv.vfmin.nxv8f16.f16( %0, half %1, - i64 %2) + iXLen %2) ret %a } @@ -839,10 +841,10 @@ declare @llvm.riscv.vfmin.mask.nxv8f16.f16( , half, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfmin_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { +define @intrinsic_vfmin_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -854,7 +856,7 @@ entry: %1, half %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -862,9 +864,9 @@ entry: declare @llvm.riscv.vfmin.nxv16f16.f16( , half, - i64); + iXLen); -define @intrinsic_vfmin_vf_nxv16f16_nxv16f16_f16( %0, half %1, i64 %2) nounwind { +define @intrinsic_vfmin_vf_nxv16f16_nxv16f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -874,7 +876,7 @@ entry: %a = call @llvm.riscv.vfmin.nxv16f16.f16( %0, half %1, - i64 %2) + iXLen %2) ret %a } @@ -884,10 +886,10 @@ declare @llvm.riscv.vfmin.mask.nxv16f16.f16( , half, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfmin_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { +define @intrinsic_vfmin_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -899,7 +901,7 @@ entry: %1, half %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -907,9 +909,9 @@ entry: declare @llvm.riscv.vfmin.nxv32f16.f16( , half, - i64); + iXLen); -define @intrinsic_vfmin_vf_nxv32f16_nxv32f16_f16( %0, half %1, i64 %2) nounwind { +define @intrinsic_vfmin_vf_nxv32f16_nxv32f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu @@ -919,7 +921,7 @@ entry: %a = call @llvm.riscv.vfmin.nxv32f16.f16( %0, half %1, - i64 %2) + iXLen %2) ret %a } @@ -929,10 +931,10 @@ declare @llvm.riscv.vfmin.mask.nxv32f16.f16( , half, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfmin_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { +define @intrinsic_vfmin_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu @@ -944,7 +946,7 @@ entry: %1, half %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -952,9 +954,9 @@ entry: declare @llvm.riscv.vfmin.nxv1f32.f32( , float, - i64); + iXLen); -define @intrinsic_vfmin_vf_nxv1f32_nxv1f32_f32( %0, float %1, i64 %2) nounwind { +define @intrinsic_vfmin_vf_nxv1f32_nxv1f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -964,7 +966,7 @@ entry: %a = call @llvm.riscv.vfmin.nxv1f32.f32( %0, float %1, - i64 %2) + iXLen %2) ret %a } @@ -974,10 +976,10 @@ declare @llvm.riscv.vfmin.mask.nxv1f32.f32( , float, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfmin_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { +define @intrinsic_vfmin_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -989,7 +991,7 @@ entry: %1, float %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -997,9 +999,9 @@ entry: declare @llvm.riscv.vfmin.nxv2f32.f32( , float, - i64); + iXLen); -define @intrinsic_vfmin_vf_nxv2f32_nxv2f32_f32( %0, float %1, i64 %2) nounwind { +define @intrinsic_vfmin_vf_nxv2f32_nxv2f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -1009,7 +1011,7 @@ entry: %a = call @llvm.riscv.vfmin.nxv2f32.f32( %0, float %1, - i64 %2) + iXLen %2) ret %a } @@ -1019,10 +1021,10 @@ declare @llvm.riscv.vfmin.mask.nxv2f32.f32( , float, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfmin_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { +define @intrinsic_vfmin_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -1034,7 +1036,7 @@ entry: %1, float %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -1042,9 +1044,9 @@ entry: declare @llvm.riscv.vfmin.nxv4f32.f32( , float, - i64); + iXLen); -define @intrinsic_vfmin_vf_nxv4f32_nxv4f32_f32( %0, float %1, i64 %2) nounwind { +define @intrinsic_vfmin_vf_nxv4f32_nxv4f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -1054,7 +1056,7 @@ entry: %a = call @llvm.riscv.vfmin.nxv4f32.f32( %0, float %1, - i64 %2) + iXLen %2) ret %a } @@ -1064,10 +1066,10 @@ declare @llvm.riscv.vfmin.mask.nxv4f32.f32( , float, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfmin_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { +define @intrinsic_vfmin_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -1079,7 +1081,7 @@ entry: %1, float %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -1087,9 +1089,9 @@ entry: declare @llvm.riscv.vfmin.nxv8f32.f32( , float, - i64); + iXLen); -define @intrinsic_vfmin_vf_nxv8f32_nxv8f32_f32( %0, float %1, i64 %2) nounwind { +define @intrinsic_vfmin_vf_nxv8f32_nxv8f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -1099,7 +1101,7 @@ entry: %a = call @llvm.riscv.vfmin.nxv8f32.f32( %0, float %1, - i64 %2) + iXLen %2) ret %a } @@ -1109,10 +1111,10 @@ declare @llvm.riscv.vfmin.mask.nxv8f32.f32( , float, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfmin_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { +define @intrinsic_vfmin_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -1124,7 +1126,7 @@ entry: %1, float %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -1132,9 +1134,9 @@ entry: declare @llvm.riscv.vfmin.nxv16f32.f32( , float, - i64); + iXLen); -define @intrinsic_vfmin_vf_nxv16f32_nxv16f32_f32( %0, float %1, i64 %2) nounwind { +define @intrinsic_vfmin_vf_nxv16f32_nxv16f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu @@ -1144,7 +1146,7 @@ entry: %a = call @llvm.riscv.vfmin.nxv16f32.f32( %0, float %1, - i64 %2) + iXLen %2) ret %a } @@ -1154,10 +1156,10 @@ declare @llvm.riscv.vfmin.mask.nxv16f32.f32( , float, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfmin_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { +define @intrinsic_vfmin_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu @@ -1169,7 +1171,7 @@ entry: %1, float %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -1177,9 +1179,9 @@ entry: declare @llvm.riscv.vfmin.nxv1f64.f64( , double, - i64); + iXLen); -define @intrinsic_vfmin_vf_nxv1f64_nxv1f64_f64( %0, double %1, i64 %2) nounwind { +define @intrinsic_vfmin_vf_nxv1f64_nxv1f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu @@ -1189,7 +1191,7 @@ entry: %a = call @llvm.riscv.vfmin.nxv1f64.f64( %0, double %1, - i64 %2) + iXLen %2) ret %a } @@ -1199,10 +1201,10 @@ declare @llvm.riscv.vfmin.mask.nxv1f64.f64( , double, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfmin_mask_vf_nxv1f64_nxv1f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { +define @intrinsic_vfmin_mask_vf_nxv1f64_nxv1f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu @@ -1214,7 +1216,7 @@ entry: %1, double %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -1222,9 +1224,9 @@ entry: declare @llvm.riscv.vfmin.nxv2f64.f64( , double, - i64); + iXLen); -define @intrinsic_vfmin_vf_nxv2f64_nxv2f64_f64( %0, double %1, i64 %2) nounwind { +define @intrinsic_vfmin_vf_nxv2f64_nxv2f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu @@ -1234,7 +1236,7 @@ entry: %a = call @llvm.riscv.vfmin.nxv2f64.f64( %0, double %1, - i64 %2) + iXLen %2) ret %a } @@ -1244,10 +1246,10 @@ declare @llvm.riscv.vfmin.mask.nxv2f64.f64( , double, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfmin_mask_vf_nxv2f64_nxv2f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { +define @intrinsic_vfmin_mask_vf_nxv2f64_nxv2f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu @@ -1259,7 +1261,7 @@ entry: %1, double %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -1267,9 +1269,9 @@ entry: declare @llvm.riscv.vfmin.nxv4f64.f64( , double, - i64); + iXLen); -define @intrinsic_vfmin_vf_nxv4f64_nxv4f64_f64( %0, double %1, i64 %2) nounwind { +define @intrinsic_vfmin_vf_nxv4f64_nxv4f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu @@ -1279,7 +1281,7 @@ entry: %a = call @llvm.riscv.vfmin.nxv4f64.f64( %0, double %1, - i64 %2) + iXLen %2) ret %a } @@ -1289,10 +1291,10 @@ declare @llvm.riscv.vfmin.mask.nxv4f64.f64( , double, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfmin_mask_vf_nxv4f64_nxv4f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { +define @intrinsic_vfmin_mask_vf_nxv4f64_nxv4f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu @@ -1304,7 +1306,7 @@ entry: %1, double %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -1312,9 +1314,9 @@ entry: declare @llvm.riscv.vfmin.nxv8f64.f64( , double, - i64); + iXLen); -define @intrinsic_vfmin_vf_nxv8f64_nxv8f64_f64( %0, double %1, i64 %2) nounwind { +define @intrinsic_vfmin_vf_nxv8f64_nxv8f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu @@ -1324,7 +1326,7 @@ entry: %a = call @llvm.riscv.vfmin.nxv8f64.f64( %0, double %1, - i64 %2) + iXLen %2) ret %a } @@ -1334,10 +1336,10 @@ declare @llvm.riscv.vfmin.mask.nxv8f64.f64( , double, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfmin_mask_vf_nxv8f64_nxv8f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { +define @intrinsic_vfmin_mask_vf_nxv8f64_nxv8f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu @@ -1349,7 +1351,7 @@ entry: %1, double %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmsac-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfmsac-rv64.ll deleted file mode 100644 index 2a5fb2896aa56d66d35b4cdbf99f2b061492c02b..0000000000000000000000000000000000000000 --- a/llvm/test/CodeGen/RISCV/rvv/vfmsac-rv64.ll +++ /dev/null @@ -1,1106 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: -target-abi=lp64d < %s | FileCheck %s -declare @llvm.riscv.vfmsac.nxv1f16.nxv1f16( - , - , - , - i64); - -define @intrinsic_vfmsac_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmsac_vv_nxv1f16_nxv1f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu -; CHECK-NEXT: vfmsac.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsac.nxv1f16.nxv1f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfmsac.mask.nxv1f16.nxv1f16( - , - , - , - , - i64); - -define @intrinsic_vfmsac_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv1f16_nxv1f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu -; CHECK-NEXT: vfmsac.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsac.mask.nxv1f16.nxv1f16( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfmsac.nxv2f16.nxv2f16( - , - , - , - i64); - -define @intrinsic_vfmsac_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmsac_vv_nxv2f16_nxv2f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu -; CHECK-NEXT: vfmsac.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsac.nxv2f16.nxv2f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfmsac.mask.nxv2f16.nxv2f16( - , - , - , - , - i64); - -define @intrinsic_vfmsac_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv2f16_nxv2f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu -; CHECK-NEXT: vfmsac.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsac.mask.nxv2f16.nxv2f16( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfmsac.nxv4f16.nxv4f16( - , - , - , - i64); - -define @intrinsic_vfmsac_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmsac_vv_nxv4f16_nxv4f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu -; CHECK-NEXT: vfmsac.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsac.nxv4f16.nxv4f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfmsac.mask.nxv4f16.nxv4f16( - , - , - , - , - i64); - -define @intrinsic_vfmsac_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv4f16_nxv4f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu -; CHECK-NEXT: vfmsac.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsac.mask.nxv4f16.nxv4f16( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfmsac.nxv8f16.nxv8f16( - , - , - , - i64); - -define @intrinsic_vfmsac_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmsac_vv_nxv8f16_nxv8f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu -; CHECK-NEXT: vfmsac.vv v8, v10, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsac.nxv8f16.nxv8f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfmsac.mask.nxv8f16.nxv8f16( - , - , - , - , - i64); - -define @intrinsic_vfmsac_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv8f16_nxv8f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu -; CHECK-NEXT: vfmsac.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsac.mask.nxv8f16.nxv8f16( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfmsac.nxv16f16.nxv16f16( - , - , - , - i64); - -define @intrinsic_vfmsac_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmsac_vv_nxv16f16_nxv16f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu -; CHECK-NEXT: vfmsac.vv v8, v12, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsac.nxv16f16.nxv16f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfmsac.mask.nxv16f16.nxv16f16( - , - , - , - , - i64); - -define @intrinsic_vfmsac_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv16f16_nxv16f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu -; CHECK-NEXT: vfmsac.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsac.mask.nxv16f16.nxv16f16( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfmsac.nxv1f32.nxv1f32( - , - , - , - i64); - -define @intrinsic_vfmsac_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmsac_vv_nxv1f32_nxv1f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu -; CHECK-NEXT: vfmsac.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsac.nxv1f32.nxv1f32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfmsac.mask.nxv1f32.nxv1f32( - , - , - , - , - i64); - -define @intrinsic_vfmsac_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv1f32_nxv1f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu -; CHECK-NEXT: vfmsac.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsac.mask.nxv1f32.nxv1f32( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfmsac.nxv2f32.nxv2f32( - , - , - , - i64); - -define @intrinsic_vfmsac_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmsac_vv_nxv2f32_nxv2f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu -; CHECK-NEXT: vfmsac.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsac.nxv2f32.nxv2f32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfmsac.mask.nxv2f32.nxv2f32( - , - , - , - , - i64); - -define @intrinsic_vfmsac_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv2f32_nxv2f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu -; CHECK-NEXT: vfmsac.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsac.mask.nxv2f32.nxv2f32( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfmsac.nxv4f32.nxv4f32( - , - , - , - i64); - -define @intrinsic_vfmsac_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmsac_vv_nxv4f32_nxv4f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu -; CHECK-NEXT: vfmsac.vv v8, v10, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsac.nxv4f32.nxv4f32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfmsac.mask.nxv4f32.nxv4f32( - , - , - , - , - i64); - -define @intrinsic_vfmsac_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv4f32_nxv4f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu -; CHECK-NEXT: vfmsac.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsac.mask.nxv4f32.nxv4f32( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfmsac.nxv8f32.nxv8f32( - , - , - , - i64); - -define @intrinsic_vfmsac_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmsac_vv_nxv8f32_nxv8f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu -; CHECK-NEXT: vfmsac.vv v8, v12, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsac.nxv8f32.nxv8f32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfmsac.mask.nxv8f32.nxv8f32( - , - , - , - , - i64); - -define @intrinsic_vfmsac_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv8f32_nxv8f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu -; CHECK-NEXT: vfmsac.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsac.mask.nxv8f32.nxv8f32( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfmsac.nxv1f64.nxv1f64( - , - , - , - i64); - -define @intrinsic_vfmsac_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmsac_vv_nxv1f64_nxv1f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu -; CHECK-NEXT: vfmsac.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsac.nxv1f64.nxv1f64( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfmsac.mask.nxv1f64.nxv1f64( - , - , - , - , - i64); - -define @intrinsic_vfmsac_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv1f64_nxv1f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu -; CHECK-NEXT: vfmsac.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsac.mask.nxv1f64.nxv1f64( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfmsac.nxv2f64.nxv2f64( - , - , - , - i64); - -define @intrinsic_vfmsac_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmsac_vv_nxv2f64_nxv2f64_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu -; CHECK-NEXT: vfmsac.vv v8, v10, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsac.nxv2f64.nxv2f64( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfmsac.mask.nxv2f64.nxv2f64( - , - , - , - , - i64); - -define @intrinsic_vfmsac_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv2f64_nxv2f64_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu -; CHECK-NEXT: vfmsac.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsac.mask.nxv2f64.nxv2f64( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfmsac.nxv4f64.nxv4f64( - , - , - , - i64); - -define @intrinsic_vfmsac_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmsac_vv_nxv4f64_nxv4f64_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu -; CHECK-NEXT: vfmsac.vv v8, v12, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsac.nxv4f64.nxv4f64( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfmsac.mask.nxv4f64.nxv4f64( - , - , - , - , - i64); - -define @intrinsic_vfmsac_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv4f64_nxv4f64_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu -; CHECK-NEXT: vfmsac.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsac.mask.nxv4f64.nxv4f64( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfmsac.nxv1f16.f16( - , - half, - , - i64); - -define @intrinsic_vfmsac_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmsac_vf_nxv1f16_f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu -; CHECK-NEXT: vfmsac.vf v8, fa0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsac.nxv1f16.f16( - %0, - half %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfmsac.mask.nxv1f16.f16( - , - half, - , - , - i64); - -define @intrinsic_vfmsac_mask_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv1f16_f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu -; CHECK-NEXT: vfmsac.vf v8, fa0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsac.mask.nxv1f16.f16( - %0, - half %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfmsac.nxv2f16.f16( - , - half, - , - i64); - -define @intrinsic_vfmsac_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmsac_vf_nxv2f16_f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu -; CHECK-NEXT: vfmsac.vf v8, fa0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsac.nxv2f16.f16( - %0, - half %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfmsac.mask.nxv2f16.f16( - , - half, - , - , - i64); - -define @intrinsic_vfmsac_mask_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv2f16_f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu -; CHECK-NEXT: vfmsac.vf v8, fa0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsac.mask.nxv2f16.f16( - %0, - half %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfmsac.nxv4f16.f16( - , - half, - , - i64); - -define @intrinsic_vfmsac_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmsac_vf_nxv4f16_f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu -; CHECK-NEXT: vfmsac.vf v8, fa0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsac.nxv4f16.f16( - %0, - half %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfmsac.mask.nxv4f16.f16( - , - half, - , - , - i64); - -define @intrinsic_vfmsac_mask_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv4f16_f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu -; CHECK-NEXT: vfmsac.vf v8, fa0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsac.mask.nxv4f16.f16( - %0, - half %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfmsac.nxv8f16.f16( - , - half, - , - i64); - -define @intrinsic_vfmsac_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmsac_vf_nxv8f16_f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu -; CHECK-NEXT: vfmsac.vf v8, fa0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsac.nxv8f16.f16( - %0, - half %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfmsac.mask.nxv8f16.f16( - , - half, - , - , - i64); - -define @intrinsic_vfmsac_mask_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv8f16_f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu -; CHECK-NEXT: vfmsac.vf v8, fa0, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsac.mask.nxv8f16.f16( - %0, - half %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfmsac.nxv16f16.f16( - , - half, - , - i64); - -define @intrinsic_vfmsac_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmsac_vf_nxv16f16_f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu -; CHECK-NEXT: vfmsac.vf v8, fa0, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsac.nxv16f16.f16( - %0, - half %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfmsac.mask.nxv16f16.f16( - , - half, - , - , - i64); - -define @intrinsic_vfmsac_mask_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv16f16_f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu -; CHECK-NEXT: vfmsac.vf v8, fa0, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsac.mask.nxv16f16.f16( - %0, - half %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfmsac.nxv1f32.f32( - , - float, - , - i64); - -define @intrinsic_vfmsac_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmsac_vf_nxv1f32_f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu -; CHECK-NEXT: vfmsac.vf v8, fa0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsac.nxv1f32.f32( - %0, - float %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfmsac.mask.nxv1f32.f32( - , - float, - , - , - i64); - -define @intrinsic_vfmsac_mask_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv1f32_f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu -; CHECK-NEXT: vfmsac.vf v8, fa0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsac.mask.nxv1f32.f32( - %0, - float %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfmsac.nxv2f32.f32( - , - float, - , - i64); - -define @intrinsic_vfmsac_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmsac_vf_nxv2f32_f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu -; CHECK-NEXT: vfmsac.vf v8, fa0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsac.nxv2f32.f32( - %0, - float %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfmsac.mask.nxv2f32.f32( - , - float, - , - , - i64); - -define @intrinsic_vfmsac_mask_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv2f32_f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu -; CHECK-NEXT: vfmsac.vf v8, fa0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsac.mask.nxv2f32.f32( - %0, - float %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfmsac.nxv4f32.f32( - , - float, - , - i64); - -define @intrinsic_vfmsac_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmsac_vf_nxv4f32_f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu -; CHECK-NEXT: vfmsac.vf v8, fa0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsac.nxv4f32.f32( - %0, - float %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfmsac.mask.nxv4f32.f32( - , - float, - , - , - i64); - -define @intrinsic_vfmsac_mask_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv4f32_f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu -; CHECK-NEXT: vfmsac.vf v8, fa0, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsac.mask.nxv4f32.f32( - %0, - float %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfmsac.nxv8f32.f32( - , - float, - , - i64); - -define @intrinsic_vfmsac_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmsac_vf_nxv8f32_f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu -; CHECK-NEXT: vfmsac.vf v8, fa0, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsac.nxv8f32.f32( - %0, - float %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfmsac.mask.nxv8f32.f32( - , - float, - , - , - i64); - -define @intrinsic_vfmsac_mask_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv8f32_f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu -; CHECK-NEXT: vfmsac.vf v8, fa0, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsac.mask.nxv8f32.f32( - %0, - float %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfmsac.nxv1f64.f64( - , - double, - , - i64); - -define @intrinsic_vfmsac_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmsac_vf_nxv1f64_f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu -; CHECK-NEXT: vfmsac.vf v8, fa0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsac.nxv1f64.f64( - %0, - double %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfmsac.mask.nxv1f64.f64( - , - double, - , - , - i64); - -define @intrinsic_vfmsac_mask_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv1f64_f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu -; CHECK-NEXT: vfmsac.vf v8, fa0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsac.mask.nxv1f64.f64( - %0, - double %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfmsac.nxv2f64.f64( - , - double, - , - i64); - -define @intrinsic_vfmsac_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmsac_vf_nxv2f64_f64_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu -; CHECK-NEXT: vfmsac.vf v8, fa0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsac.nxv2f64.f64( - %0, - double %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfmsac.mask.nxv2f64.f64( - , - double, - , - , - i64); - -define @intrinsic_vfmsac_mask_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv2f64_f64_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu -; CHECK-NEXT: vfmsac.vf v8, fa0, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsac.mask.nxv2f64.f64( - %0, - double %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfmsac.nxv4f64.f64( - , - double, - , - i64); - -define @intrinsic_vfmsac_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmsac_vf_nxv4f64_f64_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu -; CHECK-NEXT: vfmsac.vf v8, fa0, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsac.nxv4f64.f64( - %0, - double %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfmsac.mask.nxv4f64.f64( - , - double, - , - , - i64); - -define @intrinsic_vfmsac_mask_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv4f64_f64_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu -; CHECK-NEXT: vfmsac.vf v8, fa0, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsac.mask.nxv4f64.f64( - %0, - double %1, - %2, - %3, - i64 %4) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmsac-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfmsac.ll similarity index 89% rename from llvm/test/CodeGen/RISCV/rvv/vfmsac-rv32.ll rename to llvm/test/CodeGen/RISCV/rvv/vfmsac.ll index c8407dfe64730a61f787c6041a02a38c79be3175..cf9df7550fcc7e069d8345c95f5998d76b4a202d 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfmsac-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmsac.ll @@ -1,13 +1,15 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: -target-abi=ilp32d < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfmsac.nxv1f16.nxv1f16( , , , - i32); + iXLen); -define @intrinsic_vfmsac_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfmsac_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu @@ -18,7 +20,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -28,9 +30,9 @@ declare @llvm.riscv.vfmsac.mask.nxv1f16.nxv1f16( , , , - i32); + iXLen); -define @intrinsic_vfmsac_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmsac_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu @@ -42,7 +44,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -51,9 +53,9 @@ declare @llvm.riscv.vfmsac.nxv2f16.nxv2f16( , , , - i32); + iXLen); -define @intrinsic_vfmsac_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfmsac_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu @@ -64,7 +66,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -74,9 +76,9 @@ declare @llvm.riscv.vfmsac.mask.nxv2f16.nxv2f16( , , , - i32); + iXLen); -define @intrinsic_vfmsac_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmsac_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu @@ -88,7 +90,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -97,9 +99,9 @@ declare @llvm.riscv.vfmsac.nxv4f16.nxv4f16( , , , - i32); + iXLen); -define @intrinsic_vfmsac_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfmsac_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu @@ -110,7 +112,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -120,9 +122,9 @@ declare @llvm.riscv.vfmsac.mask.nxv4f16.nxv4f16( , , , - i32); + iXLen); -define @intrinsic_vfmsac_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmsac_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu @@ -134,7 +136,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -143,9 +145,9 @@ declare @llvm.riscv.vfmsac.nxv8f16.nxv8f16( , , , - i32); + iXLen); -define @intrinsic_vfmsac_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfmsac_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu @@ -156,7 +158,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -166,9 +168,9 @@ declare @llvm.riscv.vfmsac.mask.nxv8f16.nxv8f16( , , , - i32); + iXLen); -define @intrinsic_vfmsac_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmsac_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu @@ -180,7 +182,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -189,9 +191,9 @@ declare @llvm.riscv.vfmsac.nxv16f16.nxv16f16( , , , - i32); + iXLen); -define @intrinsic_vfmsac_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfmsac_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu @@ -202,7 +204,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -212,9 +214,9 @@ declare @llvm.riscv.vfmsac.mask.nxv16f16.nxv16f16( , , , - i32); + iXLen); -define @intrinsic_vfmsac_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmsac_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu @@ -226,7 +228,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -235,9 +237,9 @@ declare @llvm.riscv.vfmsac.nxv1f32.nxv1f32( , , , - i32); + iXLen); -define @intrinsic_vfmsac_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfmsac_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu @@ -248,7 +250,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -258,9 +260,9 @@ declare @llvm.riscv.vfmsac.mask.nxv1f32.nxv1f32( , , , - i32); + iXLen); -define @intrinsic_vfmsac_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmsac_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu @@ -272,7 +274,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -281,9 +283,9 @@ declare @llvm.riscv.vfmsac.nxv2f32.nxv2f32( , , , - i32); + iXLen); -define @intrinsic_vfmsac_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfmsac_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu @@ -294,7 +296,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -304,9 +306,9 @@ declare @llvm.riscv.vfmsac.mask.nxv2f32.nxv2f32( , , , - i32); + iXLen); -define @intrinsic_vfmsac_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmsac_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu @@ -318,7 +320,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -327,9 +329,9 @@ declare @llvm.riscv.vfmsac.nxv4f32.nxv4f32( , , , - i32); + iXLen); -define @intrinsic_vfmsac_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfmsac_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu @@ -340,7 +342,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -350,9 +352,9 @@ declare @llvm.riscv.vfmsac.mask.nxv4f32.nxv4f32( , , , - i32); + iXLen); -define @intrinsic_vfmsac_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmsac_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu @@ -364,7 +366,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -373,9 +375,9 @@ declare @llvm.riscv.vfmsac.nxv8f32.nxv8f32( , , , - i32); + iXLen); -define @intrinsic_vfmsac_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfmsac_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu @@ -386,7 +388,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -396,9 +398,9 @@ declare @llvm.riscv.vfmsac.mask.nxv8f32.nxv8f32( , , , - i32); + iXLen); -define @intrinsic_vfmsac_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmsac_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu @@ -410,7 +412,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -419,9 +421,9 @@ declare @llvm.riscv.vfmsac.nxv1f64.nxv1f64( , , , - i32); + iXLen); -define @intrinsic_vfmsac_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfmsac_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu @@ -432,7 +434,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -442,9 +444,9 @@ declare @llvm.riscv.vfmsac.mask.nxv1f64.nxv1f64( , , , - i32); + iXLen); -define @intrinsic_vfmsac_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmsac_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu @@ -456,7 +458,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -465,9 +467,9 @@ declare @llvm.riscv.vfmsac.nxv2f64.nxv2f64( , , , - i32); + iXLen); -define @intrinsic_vfmsac_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfmsac_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu @@ -478,7 +480,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -488,9 +490,9 @@ declare @llvm.riscv.vfmsac.mask.nxv2f64.nxv2f64( , , , - i32); + iXLen); -define @intrinsic_vfmsac_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmsac_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu @@ -502,7 +504,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -511,9 +513,9 @@ declare @llvm.riscv.vfmsac.nxv4f64.nxv4f64( , , , - i32); + iXLen); -define @intrinsic_vfmsac_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfmsac_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu @@ -524,7 +526,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -534,9 +536,9 @@ declare @llvm.riscv.vfmsac.mask.nxv4f64.nxv4f64( , , , - i32); + iXLen); -define @intrinsic_vfmsac_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmsac_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu @@ -548,7 +550,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -557,9 +559,9 @@ declare @llvm.riscv.vfmsac.nxv1f16.f16( , half, , - i32); + iXLen); -define @intrinsic_vfmsac_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, i32 %3) nounwind { +define @intrinsic_vfmsac_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vf_nxv1f16_f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu @@ -570,7 +572,7 @@ entry: %0, half %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -580,9 +582,9 @@ declare @llvm.riscv.vfmsac.mask.nxv1f16.f16( half, , , - i32); + iXLen); -define @intrinsic_vfmsac_mask_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmsac_mask_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv1f16_f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu @@ -594,7 +596,7 @@ entry: half %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -603,9 +605,9 @@ declare @llvm.riscv.vfmsac.nxv2f16.f16( , half, , - i32); + iXLen); -define @intrinsic_vfmsac_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, i32 %3) nounwind { +define @intrinsic_vfmsac_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vf_nxv2f16_f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu @@ -616,7 +618,7 @@ entry: %0, half %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -626,9 +628,9 @@ declare @llvm.riscv.vfmsac.mask.nxv2f16.f16( half, , , - i32); + iXLen); -define @intrinsic_vfmsac_mask_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmsac_mask_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv2f16_f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu @@ -640,7 +642,7 @@ entry: half %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -649,9 +651,9 @@ declare @llvm.riscv.vfmsac.nxv4f16.f16( , half, , - i32); + iXLen); -define @intrinsic_vfmsac_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, i32 %3) nounwind { +define @intrinsic_vfmsac_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vf_nxv4f16_f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu @@ -662,7 +664,7 @@ entry: %0, half %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -672,9 +674,9 @@ declare @llvm.riscv.vfmsac.mask.nxv4f16.f16( half, , , - i32); + iXLen); -define @intrinsic_vfmsac_mask_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmsac_mask_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv4f16_f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu @@ -686,7 +688,7 @@ entry: half %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -695,9 +697,9 @@ declare @llvm.riscv.vfmsac.nxv8f16.f16( , half, , - i32); + iXLen); -define @intrinsic_vfmsac_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, i32 %3) nounwind { +define @intrinsic_vfmsac_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vf_nxv8f16_f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu @@ -708,7 +710,7 @@ entry: %0, half %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -718,9 +720,9 @@ declare @llvm.riscv.vfmsac.mask.nxv8f16.f16( half, , , - i32); + iXLen); -define @intrinsic_vfmsac_mask_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmsac_mask_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv8f16_f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu @@ -732,7 +734,7 @@ entry: half %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -741,9 +743,9 @@ declare @llvm.riscv.vfmsac.nxv16f16.f16( , half, , - i32); + iXLen); -define @intrinsic_vfmsac_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, i32 %3) nounwind { +define @intrinsic_vfmsac_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vf_nxv16f16_f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu @@ -754,7 +756,7 @@ entry: %0, half %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -764,9 +766,9 @@ declare @llvm.riscv.vfmsac.mask.nxv16f16.f16( half, , , - i32); + iXLen); -define @intrinsic_vfmsac_mask_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmsac_mask_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv16f16_f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu @@ -778,7 +780,7 @@ entry: half %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -787,9 +789,9 @@ declare @llvm.riscv.vfmsac.nxv1f32.f32( , float, , - i32); + iXLen); -define @intrinsic_vfmsac_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, i32 %3) nounwind { +define @intrinsic_vfmsac_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vf_nxv1f32_f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu @@ -800,7 +802,7 @@ entry: %0, float %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -810,9 +812,9 @@ declare @llvm.riscv.vfmsac.mask.nxv1f32.f32( float, , , - i32); + iXLen); -define @intrinsic_vfmsac_mask_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmsac_mask_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv1f32_f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu @@ -824,7 +826,7 @@ entry: float %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -833,9 +835,9 @@ declare @llvm.riscv.vfmsac.nxv2f32.f32( , float, , - i32); + iXLen); -define @intrinsic_vfmsac_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, i32 %3) nounwind { +define @intrinsic_vfmsac_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vf_nxv2f32_f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu @@ -846,7 +848,7 @@ entry: %0, float %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -856,9 +858,9 @@ declare @llvm.riscv.vfmsac.mask.nxv2f32.f32( float, , , - i32); + iXLen); -define @intrinsic_vfmsac_mask_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmsac_mask_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv2f32_f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu @@ -870,7 +872,7 @@ entry: float %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -879,9 +881,9 @@ declare @llvm.riscv.vfmsac.nxv4f32.f32( , float, , - i32); + iXLen); -define @intrinsic_vfmsac_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, i32 %3) nounwind { +define @intrinsic_vfmsac_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vf_nxv4f32_f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu @@ -892,7 +894,7 @@ entry: %0, float %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -902,9 +904,9 @@ declare @llvm.riscv.vfmsac.mask.nxv4f32.f32( float, , , - i32); + iXLen); -define @intrinsic_vfmsac_mask_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmsac_mask_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv4f32_f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu @@ -916,7 +918,7 @@ entry: float %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -925,9 +927,9 @@ declare @llvm.riscv.vfmsac.nxv8f32.f32( , float, , - i32); + iXLen); -define @intrinsic_vfmsac_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, i32 %3) nounwind { +define @intrinsic_vfmsac_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vf_nxv8f32_f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu @@ -938,7 +940,7 @@ entry: %0, float %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -948,9 +950,9 @@ declare @llvm.riscv.vfmsac.mask.nxv8f32.f32( float, , , - i32); + iXLen); -define @intrinsic_vfmsac_mask_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmsac_mask_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv8f32_f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu @@ -962,7 +964,7 @@ entry: float %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -971,9 +973,9 @@ declare @llvm.riscv.vfmsac.nxv1f64.f64( , double, , - i32); + iXLen); -define @intrinsic_vfmsac_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, i32 %3) nounwind { +define @intrinsic_vfmsac_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vf_nxv1f64_f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu @@ -984,7 +986,7 @@ entry: %0, double %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -994,9 +996,9 @@ declare @llvm.riscv.vfmsac.mask.nxv1f64.f64( double, , , - i32); + iXLen); -define @intrinsic_vfmsac_mask_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmsac_mask_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv1f64_f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu @@ -1008,7 +1010,7 @@ entry: double %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -1017,9 +1019,9 @@ declare @llvm.riscv.vfmsac.nxv2f64.f64( , double, , - i32); + iXLen); -define @intrinsic_vfmsac_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, i32 %3) nounwind { +define @intrinsic_vfmsac_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vf_nxv2f64_f64_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu @@ -1030,7 +1032,7 @@ entry: %0, double %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -1040,9 +1042,9 @@ declare @llvm.riscv.vfmsac.mask.nxv2f64.f64( double, , , - i32); + iXLen); -define @intrinsic_vfmsac_mask_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmsac_mask_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv2f64_f64_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu @@ -1054,7 +1056,7 @@ entry: double %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -1063,9 +1065,9 @@ declare @llvm.riscv.vfmsac.nxv4f64.f64( , double, , - i32); + iXLen); -define @intrinsic_vfmsac_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, i32 %3) nounwind { +define @intrinsic_vfmsac_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vf_nxv4f64_f64_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu @@ -1076,7 +1078,7 @@ entry: %0, double %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -1086,9 +1088,9 @@ declare @llvm.riscv.vfmsac.mask.nxv4f64.f64( double, , , - i32); + iXLen); -define @intrinsic_vfmsac_mask_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmsac_mask_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv4f64_f64_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu @@ -1100,7 +1102,7 @@ entry: double %1, %2, %3, - i32 %4) + iXLen %4) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmsub-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfmsub-rv64.ll deleted file mode 100644 index 70efc0da21f5a7c40b01003aa654f619e10cbf3b..0000000000000000000000000000000000000000 --- a/llvm/test/CodeGen/RISCV/rvv/vfmsub-rv64.ll +++ /dev/null @@ -1,1106 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: -target-abi=lp64d < %s | FileCheck %s -declare @llvm.riscv.vfmsub.nxv1f16.nxv1f16( - , - , - , - i64); - -define @intrinsic_vfmsub_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmsub_vv_nxv1f16_nxv1f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu -; CHECK-NEXT: vfmsub.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsub.nxv1f16.nxv1f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfmsub.mask.nxv1f16.nxv1f16( - , - , - , - , - i64); - -define @intrinsic_vfmsub_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv1f16_nxv1f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu -; CHECK-NEXT: vfmsub.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsub.mask.nxv1f16.nxv1f16( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfmsub.nxv2f16.nxv2f16( - , - , - , - i64); - -define @intrinsic_vfmsub_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmsub_vv_nxv2f16_nxv2f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu -; CHECK-NEXT: vfmsub.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsub.nxv2f16.nxv2f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfmsub.mask.nxv2f16.nxv2f16( - , - , - , - , - i64); - -define @intrinsic_vfmsub_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv2f16_nxv2f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu -; CHECK-NEXT: vfmsub.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsub.mask.nxv2f16.nxv2f16( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfmsub.nxv4f16.nxv4f16( - , - , - , - i64); - -define @intrinsic_vfmsub_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmsub_vv_nxv4f16_nxv4f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu -; CHECK-NEXT: vfmsub.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsub.nxv4f16.nxv4f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfmsub.mask.nxv4f16.nxv4f16( - , - , - , - , - i64); - -define @intrinsic_vfmsub_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv4f16_nxv4f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu -; CHECK-NEXT: vfmsub.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsub.mask.nxv4f16.nxv4f16( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfmsub.nxv8f16.nxv8f16( - , - , - , - i64); - -define @intrinsic_vfmsub_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmsub_vv_nxv8f16_nxv8f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu -; CHECK-NEXT: vfmsub.vv v8, v10, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsub.nxv8f16.nxv8f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfmsub.mask.nxv8f16.nxv8f16( - , - , - , - , - i64); - -define @intrinsic_vfmsub_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv8f16_nxv8f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu -; CHECK-NEXT: vfmsub.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsub.mask.nxv8f16.nxv8f16( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfmsub.nxv16f16.nxv16f16( - , - , - , - i64); - -define @intrinsic_vfmsub_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmsub_vv_nxv16f16_nxv16f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu -; CHECK-NEXT: vfmsub.vv v8, v12, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsub.nxv16f16.nxv16f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfmsub.mask.nxv16f16.nxv16f16( - , - , - , - , - i64); - -define @intrinsic_vfmsub_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv16f16_nxv16f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu -; CHECK-NEXT: vfmsub.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsub.mask.nxv16f16.nxv16f16( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfmsub.nxv1f32.nxv1f32( - , - , - , - i64); - -define @intrinsic_vfmsub_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmsub_vv_nxv1f32_nxv1f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu -; CHECK-NEXT: vfmsub.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsub.nxv1f32.nxv1f32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfmsub.mask.nxv1f32.nxv1f32( - , - , - , - , - i64); - -define @intrinsic_vfmsub_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv1f32_nxv1f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu -; CHECK-NEXT: vfmsub.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsub.mask.nxv1f32.nxv1f32( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfmsub.nxv2f32.nxv2f32( - , - , - , - i64); - -define @intrinsic_vfmsub_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmsub_vv_nxv2f32_nxv2f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu -; CHECK-NEXT: vfmsub.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsub.nxv2f32.nxv2f32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfmsub.mask.nxv2f32.nxv2f32( - , - , - , - , - i64); - -define @intrinsic_vfmsub_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv2f32_nxv2f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu -; CHECK-NEXT: vfmsub.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsub.mask.nxv2f32.nxv2f32( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfmsub.nxv4f32.nxv4f32( - , - , - , - i64); - -define @intrinsic_vfmsub_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmsub_vv_nxv4f32_nxv4f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu -; CHECK-NEXT: vfmsub.vv v8, v10, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsub.nxv4f32.nxv4f32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfmsub.mask.nxv4f32.nxv4f32( - , - , - , - , - i64); - -define @intrinsic_vfmsub_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv4f32_nxv4f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu -; CHECK-NEXT: vfmsub.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsub.mask.nxv4f32.nxv4f32( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfmsub.nxv8f32.nxv8f32( - , - , - , - i64); - -define @intrinsic_vfmsub_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmsub_vv_nxv8f32_nxv8f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu -; CHECK-NEXT: vfmsub.vv v8, v12, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsub.nxv8f32.nxv8f32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfmsub.mask.nxv8f32.nxv8f32( - , - , - , - , - i64); - -define @intrinsic_vfmsub_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv8f32_nxv8f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu -; CHECK-NEXT: vfmsub.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsub.mask.nxv8f32.nxv8f32( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfmsub.nxv1f64.nxv1f64( - , - , - , - i64); - -define @intrinsic_vfmsub_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmsub_vv_nxv1f64_nxv1f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu -; CHECK-NEXT: vfmsub.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsub.nxv1f64.nxv1f64( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfmsub.mask.nxv1f64.nxv1f64( - , - , - , - , - i64); - -define @intrinsic_vfmsub_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv1f64_nxv1f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu -; CHECK-NEXT: vfmsub.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsub.mask.nxv1f64.nxv1f64( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfmsub.nxv2f64.nxv2f64( - , - , - , - i64); - -define @intrinsic_vfmsub_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmsub_vv_nxv2f64_nxv2f64_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu -; CHECK-NEXT: vfmsub.vv v8, v10, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsub.nxv2f64.nxv2f64( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfmsub.mask.nxv2f64.nxv2f64( - , - , - , - , - i64); - -define @intrinsic_vfmsub_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv2f64_nxv2f64_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu -; CHECK-NEXT: vfmsub.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsub.mask.nxv2f64.nxv2f64( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfmsub.nxv4f64.nxv4f64( - , - , - , - i64); - -define @intrinsic_vfmsub_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmsub_vv_nxv4f64_nxv4f64_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu -; CHECK-NEXT: vfmsub.vv v8, v12, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsub.nxv4f64.nxv4f64( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfmsub.mask.nxv4f64.nxv4f64( - , - , - , - , - i64); - -define @intrinsic_vfmsub_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv4f64_nxv4f64_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu -; CHECK-NEXT: vfmsub.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsub.mask.nxv4f64.nxv4f64( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfmsub.nxv1f16.f16( - , - half, - , - i64); - -define @intrinsic_vfmsub_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmsub_vf_nxv1f16_f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu -; CHECK-NEXT: vfmsub.vf v8, fa0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsub.nxv1f16.f16( - %0, - half %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfmsub.mask.nxv1f16.f16( - , - half, - , - , - i64); - -define @intrinsic_vfmsub_mask_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv1f16_f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu -; CHECK-NEXT: vfmsub.vf v8, fa0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsub.mask.nxv1f16.f16( - %0, - half %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfmsub.nxv2f16.f16( - , - half, - , - i64); - -define @intrinsic_vfmsub_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmsub_vf_nxv2f16_f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu -; CHECK-NEXT: vfmsub.vf v8, fa0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsub.nxv2f16.f16( - %0, - half %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfmsub.mask.nxv2f16.f16( - , - half, - , - , - i64); - -define @intrinsic_vfmsub_mask_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv2f16_f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu -; CHECK-NEXT: vfmsub.vf v8, fa0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsub.mask.nxv2f16.f16( - %0, - half %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfmsub.nxv4f16.f16( - , - half, - , - i64); - -define @intrinsic_vfmsub_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmsub_vf_nxv4f16_f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu -; CHECK-NEXT: vfmsub.vf v8, fa0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsub.nxv4f16.f16( - %0, - half %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfmsub.mask.nxv4f16.f16( - , - half, - , - , - i64); - -define @intrinsic_vfmsub_mask_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv4f16_f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu -; CHECK-NEXT: vfmsub.vf v8, fa0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsub.mask.nxv4f16.f16( - %0, - half %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfmsub.nxv8f16.f16( - , - half, - , - i64); - -define @intrinsic_vfmsub_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmsub_vf_nxv8f16_f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu -; CHECK-NEXT: vfmsub.vf v8, fa0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsub.nxv8f16.f16( - %0, - half %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfmsub.mask.nxv8f16.f16( - , - half, - , - , - i64); - -define @intrinsic_vfmsub_mask_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv8f16_f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu -; CHECK-NEXT: vfmsub.vf v8, fa0, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsub.mask.nxv8f16.f16( - %0, - half %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfmsub.nxv16f16.f16( - , - half, - , - i64); - -define @intrinsic_vfmsub_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmsub_vf_nxv16f16_f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu -; CHECK-NEXT: vfmsub.vf v8, fa0, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsub.nxv16f16.f16( - %0, - half %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfmsub.mask.nxv16f16.f16( - , - half, - , - , - i64); - -define @intrinsic_vfmsub_mask_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv16f16_f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu -; CHECK-NEXT: vfmsub.vf v8, fa0, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsub.mask.nxv16f16.f16( - %0, - half %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfmsub.nxv1f32.f32( - , - float, - , - i64); - -define @intrinsic_vfmsub_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmsub_vf_nxv1f32_f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu -; CHECK-NEXT: vfmsub.vf v8, fa0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsub.nxv1f32.f32( - %0, - float %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfmsub.mask.nxv1f32.f32( - , - float, - , - , - i64); - -define @intrinsic_vfmsub_mask_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv1f32_f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu -; CHECK-NEXT: vfmsub.vf v8, fa0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsub.mask.nxv1f32.f32( - %0, - float %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfmsub.nxv2f32.f32( - , - float, - , - i64); - -define @intrinsic_vfmsub_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmsub_vf_nxv2f32_f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu -; CHECK-NEXT: vfmsub.vf v8, fa0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsub.nxv2f32.f32( - %0, - float %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfmsub.mask.nxv2f32.f32( - , - float, - , - , - i64); - -define @intrinsic_vfmsub_mask_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv2f32_f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu -; CHECK-NEXT: vfmsub.vf v8, fa0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsub.mask.nxv2f32.f32( - %0, - float %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfmsub.nxv4f32.f32( - , - float, - , - i64); - -define @intrinsic_vfmsub_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmsub_vf_nxv4f32_f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu -; CHECK-NEXT: vfmsub.vf v8, fa0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsub.nxv4f32.f32( - %0, - float %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfmsub.mask.nxv4f32.f32( - , - float, - , - , - i64); - -define @intrinsic_vfmsub_mask_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv4f32_f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu -; CHECK-NEXT: vfmsub.vf v8, fa0, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsub.mask.nxv4f32.f32( - %0, - float %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfmsub.nxv8f32.f32( - , - float, - , - i64); - -define @intrinsic_vfmsub_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmsub_vf_nxv8f32_f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu -; CHECK-NEXT: vfmsub.vf v8, fa0, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsub.nxv8f32.f32( - %0, - float %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfmsub.mask.nxv8f32.f32( - , - float, - , - , - i64); - -define @intrinsic_vfmsub_mask_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv8f32_f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu -; CHECK-NEXT: vfmsub.vf v8, fa0, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsub.mask.nxv8f32.f32( - %0, - float %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfmsub.nxv1f64.f64( - , - double, - , - i64); - -define @intrinsic_vfmsub_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmsub_vf_nxv1f64_f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu -; CHECK-NEXT: vfmsub.vf v8, fa0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsub.nxv1f64.f64( - %0, - double %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfmsub.mask.nxv1f64.f64( - , - double, - , - , - i64); - -define @intrinsic_vfmsub_mask_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv1f64_f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu -; CHECK-NEXT: vfmsub.vf v8, fa0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsub.mask.nxv1f64.f64( - %0, - double %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfmsub.nxv2f64.f64( - , - double, - , - i64); - -define @intrinsic_vfmsub_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmsub_vf_nxv2f64_f64_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu -; CHECK-NEXT: vfmsub.vf v8, fa0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsub.nxv2f64.f64( - %0, - double %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfmsub.mask.nxv2f64.f64( - , - double, - , - , - i64); - -define @intrinsic_vfmsub_mask_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv2f64_f64_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu -; CHECK-NEXT: vfmsub.vf v8, fa0, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsub.mask.nxv2f64.f64( - %0, - double %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfmsub.nxv4f64.f64( - , - double, - , - i64); - -define @intrinsic_vfmsub_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmsub_vf_nxv4f64_f64_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu -; CHECK-NEXT: vfmsub.vf v8, fa0, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsub.nxv4f64.f64( - %0, - double %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfmsub.mask.nxv4f64.f64( - , - double, - , - , - i64); - -define @intrinsic_vfmsub_mask_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv4f64_f64_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu -; CHECK-NEXT: vfmsub.vf v8, fa0, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsub.mask.nxv4f64.f64( - %0, - double %1, - %2, - %3, - i64 %4) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmsub-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfmsub.ll similarity index 89% rename from llvm/test/CodeGen/RISCV/rvv/vfmsub-rv32.ll rename to llvm/test/CodeGen/RISCV/rvv/vfmsub.ll index 620c3dcb1025af0739d76103253383e600a6c674..d071893ceb08506b4073e8178a1e5ce21f9f45ad 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfmsub-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmsub.ll @@ -1,13 +1,15 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: -target-abi=ilp32d < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfmsub.nxv1f16.nxv1f16( , , , - i32); + iXLen); -define @intrinsic_vfmsub_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfmsub_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu @@ -18,7 +20,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -28,9 +30,9 @@ declare @llvm.riscv.vfmsub.mask.nxv1f16.nxv1f16( , , , - i32); + iXLen); -define @intrinsic_vfmsub_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmsub_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu @@ -42,7 +44,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -51,9 +53,9 @@ declare @llvm.riscv.vfmsub.nxv2f16.nxv2f16( , , , - i32); + iXLen); -define @intrinsic_vfmsub_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfmsub_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu @@ -64,7 +66,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -74,9 +76,9 @@ declare @llvm.riscv.vfmsub.mask.nxv2f16.nxv2f16( , , , - i32); + iXLen); -define @intrinsic_vfmsub_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmsub_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu @@ -88,7 +90,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -97,9 +99,9 @@ declare @llvm.riscv.vfmsub.nxv4f16.nxv4f16( , , , - i32); + iXLen); -define @intrinsic_vfmsub_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfmsub_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu @@ -110,7 +112,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -120,9 +122,9 @@ declare @llvm.riscv.vfmsub.mask.nxv4f16.nxv4f16( , , , - i32); + iXLen); -define @intrinsic_vfmsub_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmsub_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu @@ -134,7 +136,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -143,9 +145,9 @@ declare @llvm.riscv.vfmsub.nxv8f16.nxv8f16( , , , - i32); + iXLen); -define @intrinsic_vfmsub_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfmsub_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu @@ -156,7 +158,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -166,9 +168,9 @@ declare @llvm.riscv.vfmsub.mask.nxv8f16.nxv8f16( , , , - i32); + iXLen); -define @intrinsic_vfmsub_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmsub_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu @@ -180,7 +182,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -189,9 +191,9 @@ declare @llvm.riscv.vfmsub.nxv16f16.nxv16f16( , , , - i32); + iXLen); -define @intrinsic_vfmsub_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfmsub_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu @@ -202,7 +204,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -212,9 +214,9 @@ declare @llvm.riscv.vfmsub.mask.nxv16f16.nxv16f16( , , , - i32); + iXLen); -define @intrinsic_vfmsub_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmsub_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu @@ -226,7 +228,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -235,9 +237,9 @@ declare @llvm.riscv.vfmsub.nxv1f32.nxv1f32( , , , - i32); + iXLen); -define @intrinsic_vfmsub_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfmsub_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu @@ -248,7 +250,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -258,9 +260,9 @@ declare @llvm.riscv.vfmsub.mask.nxv1f32.nxv1f32( , , , - i32); + iXLen); -define @intrinsic_vfmsub_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmsub_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu @@ -272,7 +274,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -281,9 +283,9 @@ declare @llvm.riscv.vfmsub.nxv2f32.nxv2f32( , , , - i32); + iXLen); -define @intrinsic_vfmsub_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfmsub_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu @@ -294,7 +296,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -304,9 +306,9 @@ declare @llvm.riscv.vfmsub.mask.nxv2f32.nxv2f32( , , , - i32); + iXLen); -define @intrinsic_vfmsub_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmsub_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu @@ -318,7 +320,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -327,9 +329,9 @@ declare @llvm.riscv.vfmsub.nxv4f32.nxv4f32( , , , - i32); + iXLen); -define @intrinsic_vfmsub_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfmsub_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu @@ -340,7 +342,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -350,9 +352,9 @@ declare @llvm.riscv.vfmsub.mask.nxv4f32.nxv4f32( , , , - i32); + iXLen); -define @intrinsic_vfmsub_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmsub_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu @@ -364,7 +366,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -373,9 +375,9 @@ declare @llvm.riscv.vfmsub.nxv8f32.nxv8f32( , , , - i32); + iXLen); -define @intrinsic_vfmsub_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfmsub_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu @@ -386,7 +388,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -396,9 +398,9 @@ declare @llvm.riscv.vfmsub.mask.nxv8f32.nxv8f32( , , , - i32); + iXLen); -define @intrinsic_vfmsub_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmsub_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu @@ -410,7 +412,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -419,9 +421,9 @@ declare @llvm.riscv.vfmsub.nxv1f64.nxv1f64( , , , - i32); + iXLen); -define @intrinsic_vfmsub_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfmsub_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu @@ -432,7 +434,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -442,9 +444,9 @@ declare @llvm.riscv.vfmsub.mask.nxv1f64.nxv1f64( , , , - i32); + iXLen); -define @intrinsic_vfmsub_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmsub_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu @@ -456,7 +458,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -465,9 +467,9 @@ declare @llvm.riscv.vfmsub.nxv2f64.nxv2f64( , , , - i32); + iXLen); -define @intrinsic_vfmsub_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfmsub_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu @@ -478,7 +480,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -488,9 +490,9 @@ declare @llvm.riscv.vfmsub.mask.nxv2f64.nxv2f64( , , , - i32); + iXLen); -define @intrinsic_vfmsub_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmsub_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu @@ -502,7 +504,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -511,9 +513,9 @@ declare @llvm.riscv.vfmsub.nxv4f64.nxv4f64( , , , - i32); + iXLen); -define @intrinsic_vfmsub_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfmsub_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu @@ -524,7 +526,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -534,9 +536,9 @@ declare @llvm.riscv.vfmsub.mask.nxv4f64.nxv4f64( , , , - i32); + iXLen); -define @intrinsic_vfmsub_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmsub_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu @@ -548,7 +550,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -557,9 +559,9 @@ declare @llvm.riscv.vfmsub.nxv1f16.f16( , half, , - i32); + iXLen); -define @intrinsic_vfmsub_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, i32 %3) nounwind { +define @intrinsic_vfmsub_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vf_nxv1f16_f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu @@ -570,7 +572,7 @@ entry: %0, half %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -580,9 +582,9 @@ declare @llvm.riscv.vfmsub.mask.nxv1f16.f16( half, , , - i32); + iXLen); -define @intrinsic_vfmsub_mask_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmsub_mask_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv1f16_f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu @@ -594,7 +596,7 @@ entry: half %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -603,9 +605,9 @@ declare @llvm.riscv.vfmsub.nxv2f16.f16( , half, , - i32); + iXLen); -define @intrinsic_vfmsub_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, i32 %3) nounwind { +define @intrinsic_vfmsub_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vf_nxv2f16_f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu @@ -616,7 +618,7 @@ entry: %0, half %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -626,9 +628,9 @@ declare @llvm.riscv.vfmsub.mask.nxv2f16.f16( half, , , - i32); + iXLen); -define @intrinsic_vfmsub_mask_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmsub_mask_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv2f16_f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu @@ -640,7 +642,7 @@ entry: half %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -649,9 +651,9 @@ declare @llvm.riscv.vfmsub.nxv4f16.f16( , half, , - i32); + iXLen); -define @intrinsic_vfmsub_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, i32 %3) nounwind { +define @intrinsic_vfmsub_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vf_nxv4f16_f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu @@ -662,7 +664,7 @@ entry: %0, half %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -672,9 +674,9 @@ declare @llvm.riscv.vfmsub.mask.nxv4f16.f16( half, , , - i32); + iXLen); -define @intrinsic_vfmsub_mask_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmsub_mask_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv4f16_f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu @@ -686,7 +688,7 @@ entry: half %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -695,9 +697,9 @@ declare @llvm.riscv.vfmsub.nxv8f16.f16( , half, , - i32); + iXLen); -define @intrinsic_vfmsub_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, i32 %3) nounwind { +define @intrinsic_vfmsub_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vf_nxv8f16_f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu @@ -708,7 +710,7 @@ entry: %0, half %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -718,9 +720,9 @@ declare @llvm.riscv.vfmsub.mask.nxv8f16.f16( half, , , - i32); + iXLen); -define @intrinsic_vfmsub_mask_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmsub_mask_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv8f16_f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu @@ -732,7 +734,7 @@ entry: half %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -741,9 +743,9 @@ declare @llvm.riscv.vfmsub.nxv16f16.f16( , half, , - i32); + iXLen); -define @intrinsic_vfmsub_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, i32 %3) nounwind { +define @intrinsic_vfmsub_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vf_nxv16f16_f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu @@ -754,7 +756,7 @@ entry: %0, half %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -764,9 +766,9 @@ declare @llvm.riscv.vfmsub.mask.nxv16f16.f16( half, , , - i32); + iXLen); -define @intrinsic_vfmsub_mask_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmsub_mask_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv16f16_f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu @@ -778,7 +780,7 @@ entry: half %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -787,9 +789,9 @@ declare @llvm.riscv.vfmsub.nxv1f32.f32( , float, , - i32); + iXLen); -define @intrinsic_vfmsub_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, i32 %3) nounwind { +define @intrinsic_vfmsub_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vf_nxv1f32_f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu @@ -800,7 +802,7 @@ entry: %0, float %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -810,9 +812,9 @@ declare @llvm.riscv.vfmsub.mask.nxv1f32.f32( float, , , - i32); + iXLen); -define @intrinsic_vfmsub_mask_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmsub_mask_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv1f32_f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu @@ -824,7 +826,7 @@ entry: float %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -833,9 +835,9 @@ declare @llvm.riscv.vfmsub.nxv2f32.f32( , float, , - i32); + iXLen); -define @intrinsic_vfmsub_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, i32 %3) nounwind { +define @intrinsic_vfmsub_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vf_nxv2f32_f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu @@ -846,7 +848,7 @@ entry: %0, float %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -856,9 +858,9 @@ declare @llvm.riscv.vfmsub.mask.nxv2f32.f32( float, , , - i32); + iXLen); -define @intrinsic_vfmsub_mask_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmsub_mask_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv2f32_f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu @@ -870,7 +872,7 @@ entry: float %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -879,9 +881,9 @@ declare @llvm.riscv.vfmsub.nxv4f32.f32( , float, , - i32); + iXLen); -define @intrinsic_vfmsub_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, i32 %3) nounwind { +define @intrinsic_vfmsub_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vf_nxv4f32_f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu @@ -892,7 +894,7 @@ entry: %0, float %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -902,9 +904,9 @@ declare @llvm.riscv.vfmsub.mask.nxv4f32.f32( float, , , - i32); + iXLen); -define @intrinsic_vfmsub_mask_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmsub_mask_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv4f32_f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu @@ -916,7 +918,7 @@ entry: float %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -925,9 +927,9 @@ declare @llvm.riscv.vfmsub.nxv8f32.f32( , float, , - i32); + iXLen); -define @intrinsic_vfmsub_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, i32 %3) nounwind { +define @intrinsic_vfmsub_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vf_nxv8f32_f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu @@ -938,7 +940,7 @@ entry: %0, float %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -948,9 +950,9 @@ declare @llvm.riscv.vfmsub.mask.nxv8f32.f32( float, , , - i32); + iXLen); -define @intrinsic_vfmsub_mask_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmsub_mask_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv8f32_f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu @@ -962,7 +964,7 @@ entry: float %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -971,9 +973,9 @@ declare @llvm.riscv.vfmsub.nxv1f64.f64( , double, , - i32); + iXLen); -define @intrinsic_vfmsub_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, i32 %3) nounwind { +define @intrinsic_vfmsub_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vf_nxv1f64_f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu @@ -984,7 +986,7 @@ entry: %0, double %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -994,9 +996,9 @@ declare @llvm.riscv.vfmsub.mask.nxv1f64.f64( double, , , - i32); + iXLen); -define @intrinsic_vfmsub_mask_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmsub_mask_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv1f64_f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu @@ -1008,7 +1010,7 @@ entry: double %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -1017,9 +1019,9 @@ declare @llvm.riscv.vfmsub.nxv2f64.f64( , double, , - i32); + iXLen); -define @intrinsic_vfmsub_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, i32 %3) nounwind { +define @intrinsic_vfmsub_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vf_nxv2f64_f64_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu @@ -1030,7 +1032,7 @@ entry: %0, double %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -1040,9 +1042,9 @@ declare @llvm.riscv.vfmsub.mask.nxv2f64.f64( double, , , - i32); + iXLen); -define @intrinsic_vfmsub_mask_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmsub_mask_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv2f64_f64_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu @@ -1054,7 +1056,7 @@ entry: double %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -1063,9 +1065,9 @@ declare @llvm.riscv.vfmsub.nxv4f64.f64( , double, , - i32); + iXLen); -define @intrinsic_vfmsub_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, i32 %3) nounwind { +define @intrinsic_vfmsub_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vf_nxv4f64_f64_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu @@ -1076,7 +1078,7 @@ entry: %0, double %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -1086,9 +1088,9 @@ declare @llvm.riscv.vfmsub.mask.nxv4f64.f64( double, , , - i32); + iXLen); -define @intrinsic_vfmsub_mask_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmsub_mask_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv4f64_f64_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu @@ -1100,7 +1102,7 @@ entry: double %1, %2, %3, - i32 %4) + iXLen %4) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmul-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfmul-rv64.ll deleted file mode 100644 index 08aa64b6de7fc842fb019e7a89384e1c73e14cf6..0000000000000000000000000000000000000000 --- a/llvm/test/CodeGen/RISCV/rvv/vfmul-rv64.ll +++ /dev/null @@ -1,1355 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: -target-abi=lp64d < %s | FileCheck %s -declare @llvm.riscv.vfmul.nxv1f16( - , - , - i64); - -define @intrinsic_vfmul_vv_nxv1f16_nxv1f16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmul_vv_nxv1f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfmul.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmul.nxv1f16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfmul.mask.nxv1f16( - , - , - , - , - i64, - i64); - -define @intrinsic_vfmul_mask_vv_nxv1f16_nxv1f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv1f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfmul.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmul.mask.nxv1f16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfmul.nxv2f16( - , - , - i64); - -define @intrinsic_vfmul_vv_nxv2f16_nxv2f16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmul_vv_nxv2f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfmul.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmul.nxv2f16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfmul.mask.nxv2f16( - , - , - , - , - i64, - i64); - -define @intrinsic_vfmul_mask_vv_nxv2f16_nxv2f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv2f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfmul.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmul.mask.nxv2f16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfmul.nxv4f16( - , - , - i64); - -define @intrinsic_vfmul_vv_nxv4f16_nxv4f16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmul_vv_nxv4f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfmul.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmul.nxv4f16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfmul.mask.nxv4f16( - , - , - , - , - i64, - i64); - -define @intrinsic_vfmul_mask_vv_nxv4f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv4f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfmul.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmul.mask.nxv4f16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfmul.nxv8f16( - , - , - i64); - -define @intrinsic_vfmul_vv_nxv8f16_nxv8f16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmul_vv_nxv8f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfmul.vv v8, v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmul.nxv8f16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfmul.mask.nxv8f16( - , - , - , - , - i64, - i64); - -define @intrinsic_vfmul_mask_vv_nxv8f16_nxv8f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv8f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfmul.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmul.mask.nxv8f16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfmul.nxv16f16( - , - , - i64); - -define @intrinsic_vfmul_vv_nxv16f16_nxv16f16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmul_vv_nxv16f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfmul.vv v8, v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmul.nxv16f16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfmul.mask.nxv16f16( - , - , - , - , - i64, - i64); - -define @intrinsic_vfmul_mask_vv_nxv16f16_nxv16f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv16f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfmul.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmul.mask.nxv16f16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfmul.nxv32f16( - , - , - i64); - -define @intrinsic_vfmul_vv_nxv32f16_nxv32f16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmul_vv_nxv32f16_nxv32f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu -; CHECK-NEXT: vfmul.vv v8, v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmul.nxv32f16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfmul.mask.nxv32f16( - , - , - , - , - i64, - i64); - -define @intrinsic_vfmul_mask_vv_nxv32f16_nxv32f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv32f16_nxv32f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu -; CHECK-NEXT: vfmul.vv v8, v16, v24, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmul.mask.nxv32f16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfmul.nxv1f32( - , - , - i64); - -define @intrinsic_vfmul_vv_nxv1f32_nxv1f32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmul_vv_nxv1f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfmul.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmul.nxv1f32( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfmul.mask.nxv1f32( - , - , - , - , - i64, - i64); - -define @intrinsic_vfmul_mask_vv_nxv1f32_nxv1f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv1f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfmul.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmul.mask.nxv1f32( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfmul.nxv2f32( - , - , - i64); - -define @intrinsic_vfmul_vv_nxv2f32_nxv2f32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmul_vv_nxv2f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfmul.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmul.nxv2f32( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfmul.mask.nxv2f32( - , - , - , - , - i64, - i64); - -define @intrinsic_vfmul_mask_vv_nxv2f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv2f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfmul.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmul.mask.nxv2f32( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfmul.nxv4f32( - , - , - i64); - -define @intrinsic_vfmul_vv_nxv4f32_nxv4f32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmul_vv_nxv4f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfmul.vv v8, v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmul.nxv4f32( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfmul.mask.nxv4f32( - , - , - , - , - i64, - i64); - -define @intrinsic_vfmul_mask_vv_nxv4f32_nxv4f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv4f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfmul.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmul.mask.nxv4f32( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfmul.nxv8f32( - , - , - i64); - -define @intrinsic_vfmul_vv_nxv8f32_nxv8f32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmul_vv_nxv8f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfmul.vv v8, v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmul.nxv8f32( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfmul.mask.nxv8f32( - , - , - , - , - i64, - i64); - -define @intrinsic_vfmul_mask_vv_nxv8f32_nxv8f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv8f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfmul.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmul.mask.nxv8f32( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfmul.nxv16f32( - , - , - i64); - -define @intrinsic_vfmul_vv_nxv16f32_nxv16f32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmul_vv_nxv16f32_nxv16f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu -; CHECK-NEXT: vfmul.vv v8, v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmul.nxv16f32( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfmul.mask.nxv16f32( - , - , - , - , - i64, - i64); - -define @intrinsic_vfmul_mask_vv_nxv16f32_nxv16f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv16f32_nxv16f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vl8re32.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu -; CHECK-NEXT: vfmul.vv v8, v16, v24, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmul.mask.nxv16f32( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfmul.nxv1f64( - , - , - i64); - -define @intrinsic_vfmul_vv_nxv1f64_nxv1f64( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmul_vv_nxv1f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vfmul.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmul.nxv1f64( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfmul.mask.nxv1f64( - , - , - , - , - i64, - i64); - -define @intrinsic_vfmul_mask_vv_nxv1f64_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv1f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vfmul.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmul.mask.nxv1f64( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfmul.nxv2f64( - , - , - i64); - -define @intrinsic_vfmul_vv_nxv2f64_nxv2f64( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmul_vv_nxv2f64_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vfmul.vv v8, v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmul.nxv2f64( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfmul.mask.nxv2f64( - , - , - , - , - i64, - i64); - -define @intrinsic_vfmul_mask_vv_nxv2f64_nxv2f64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv2f64_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vfmul.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmul.mask.nxv2f64( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfmul.nxv4f64( - , - , - i64); - -define @intrinsic_vfmul_vv_nxv4f64_nxv4f64( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmul_vv_nxv4f64_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vfmul.vv v8, v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmul.nxv4f64( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfmul.mask.nxv4f64( - , - , - , - , - i64, - i64); - -define @intrinsic_vfmul_mask_vv_nxv4f64_nxv4f64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv4f64_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vfmul.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmul.mask.nxv4f64( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfmul.nxv8f64( - , - , - i64); - -define @intrinsic_vfmul_vv_nxv8f64_nxv8f64( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmul_vv_nxv8f64_nxv8f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu -; CHECK-NEXT: vfmul.vv v8, v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmul.nxv8f64( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfmul.mask.nxv8f64( - , - , - , - , - i64, - i64); - -define @intrinsic_vfmul_mask_vv_nxv8f64_nxv8f64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv8f64_nxv8f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vl8re64.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu -; CHECK-NEXT: vfmul.vv v8, v16, v24, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmul.mask.nxv8f64( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfmul.nxv1f16.f16( - , - half, - i64); - -define @intrinsic_vfmul_vf_nxv1f16_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmul_vf_nxv1f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfmul.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmul.nxv1f16.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfmul.mask.nxv1f16.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfmul_mask_vf_nxv1f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv1f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfmul.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmul.mask.nxv1f16.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfmul.nxv2f16.f16( - , - half, - i64); - -define @intrinsic_vfmul_vf_nxv2f16_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmul_vf_nxv2f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfmul.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmul.nxv2f16.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfmul.mask.nxv2f16.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfmul_mask_vf_nxv2f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv2f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfmul.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmul.mask.nxv2f16.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfmul.nxv4f16.f16( - , - half, - i64); - -define @intrinsic_vfmul_vf_nxv4f16_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmul_vf_nxv4f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfmul.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmul.nxv4f16.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfmul.mask.nxv4f16.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfmul_mask_vf_nxv4f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv4f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfmul.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmul.mask.nxv4f16.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfmul.nxv8f16.f16( - , - half, - i64); - -define @intrinsic_vfmul_vf_nxv8f16_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmul_vf_nxv8f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfmul.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmul.nxv8f16.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfmul.mask.nxv8f16.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfmul_mask_vf_nxv8f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv8f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfmul.vf v8, v10, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmul.mask.nxv8f16.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfmul.nxv16f16.f16( - , - half, - i64); - -define @intrinsic_vfmul_vf_nxv16f16_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmul_vf_nxv16f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfmul.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmul.nxv16f16.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfmul.mask.nxv16f16.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfmul_mask_vf_nxv16f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv16f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfmul.vf v8, v12, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmul.mask.nxv16f16.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfmul.nxv32f16.f16( - , - half, - i64); - -define @intrinsic_vfmul_vf_nxv32f16_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmul_vf_nxv32f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu -; CHECK-NEXT: vfmul.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmul.nxv32f16.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfmul.mask.nxv32f16.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfmul_mask_vf_nxv32f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv32f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu -; CHECK-NEXT: vfmul.vf v8, v16, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmul.mask.nxv32f16.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfmul.nxv1f32.f32( - , - float, - i64); - -define @intrinsic_vfmul_vf_nxv1f32_f32( %0, float %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmul_vf_nxv1f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfmul.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmul.nxv1f32.f32( - %0, - float %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfmul.mask.nxv1f32.f32( - , - , - float, - , - i64, - i64); - -define @intrinsic_vfmul_mask_vf_nxv1f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv1f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfmul.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmul.mask.nxv1f32.f32( - %0, - %1, - float %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfmul.nxv2f32.f32( - , - float, - i64); - -define @intrinsic_vfmul_vf_nxv2f32_f32( %0, float %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmul_vf_nxv2f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfmul.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmul.nxv2f32.f32( - %0, - float %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfmul.mask.nxv2f32.f32( - , - , - float, - , - i64, - i64); - -define @intrinsic_vfmul_mask_vf_nxv2f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv2f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfmul.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmul.mask.nxv2f32.f32( - %0, - %1, - float %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfmul.nxv4f32.f32( - , - float, - i64); - -define @intrinsic_vfmul_vf_nxv4f32_f32( %0, float %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmul_vf_nxv4f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfmul.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmul.nxv4f32.f32( - %0, - float %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfmul.mask.nxv4f32.f32( - , - , - float, - , - i64, - i64); - -define @intrinsic_vfmul_mask_vf_nxv4f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv4f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfmul.vf v8, v10, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmul.mask.nxv4f32.f32( - %0, - %1, - float %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfmul.nxv8f32.f32( - , - float, - i64); - -define @intrinsic_vfmul_vf_nxv8f32_f32( %0, float %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmul_vf_nxv8f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfmul.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmul.nxv8f32.f32( - %0, - float %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfmul.mask.nxv8f32.f32( - , - , - float, - , - i64, - i64); - -define @intrinsic_vfmul_mask_vf_nxv8f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv8f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfmul.vf v8, v12, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmul.mask.nxv8f32.f32( - %0, - %1, - float %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfmul.nxv16f32.f32( - , - float, - i64); - -define @intrinsic_vfmul_vf_nxv16f32_f32( %0, float %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmul_vf_nxv16f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu -; CHECK-NEXT: vfmul.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmul.nxv16f32.f32( - %0, - float %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfmul.mask.nxv16f32.f32( - , - , - float, - , - i64, - i64); - -define @intrinsic_vfmul_mask_vf_nxv16f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv16f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu -; CHECK-NEXT: vfmul.vf v8, v16, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmul.mask.nxv16f32.f32( - %0, - %1, - float %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfmul.nxv1f64.f64( - , - double, - i64); - -define @intrinsic_vfmul_vf_nxv1f64_f64( %0, double %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmul_vf_nxv1f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vfmul.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmul.nxv1f64.f64( - %0, - double %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfmul.mask.nxv1f64.f64( - , - , - double, - , - i64, - i64); - -define @intrinsic_vfmul_mask_vf_nxv1f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv1f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vfmul.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmul.mask.nxv1f64.f64( - %0, - %1, - double %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfmul.nxv2f64.f64( - , - double, - i64); - -define @intrinsic_vfmul_vf_nxv2f64_f64( %0, double %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmul_vf_nxv2f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vfmul.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmul.nxv2f64.f64( - %0, - double %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfmul.mask.nxv2f64.f64( - , - , - double, - , - i64, - i64); - -define @intrinsic_vfmul_mask_vf_nxv2f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv2f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vfmul.vf v8, v10, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmul.mask.nxv2f64.f64( - %0, - %1, - double %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfmul.nxv4f64.f64( - , - double, - i64); - -define @intrinsic_vfmul_vf_nxv4f64_f64( %0, double %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmul_vf_nxv4f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vfmul.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmul.nxv4f64.f64( - %0, - double %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfmul.mask.nxv4f64.f64( - , - , - double, - , - i64, - i64); - -define @intrinsic_vfmul_mask_vf_nxv4f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv4f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vfmul.vf v8, v12, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmul.mask.nxv4f64.f64( - %0, - %1, - double %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfmul.nxv8f64.f64( - , - double, - i64); - -define @intrinsic_vfmul_vf_nxv8f64_f64( %0, double %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmul_vf_nxv8f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu -; CHECK-NEXT: vfmul.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmul.nxv8f64.f64( - %0, - double %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfmul.mask.nxv8f64.f64( - , - , - double, - , - i64, - i64); - -define @intrinsic_vfmul_mask_vf_nxv8f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv8f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu -; CHECK-NEXT: vfmul.vf v8, v16, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmul.mask.nxv8f64.f64( - %0, - %1, - double %2, - %3, - i64 %4, i64 1) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmul-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfmul.ll similarity index 86% rename from llvm/test/CodeGen/RISCV/rvv/vfmul-rv32.ll rename to llvm/test/CodeGen/RISCV/rvv/vfmul.ll index 50ebccd92e64cb9dbe19b51f5a282e6209bcdd4b..0f4c738025617b4375ef316480d4d461ef6210d5 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfmul-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmul.ll @@ -1,12 +1,14 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: -target-abi=ilp32d < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfmul.nxv1f16.nxv1f16( , , - i32); + iXLen); -define @intrinsic_vfmul_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, i32 %2) nounwind { +define @intrinsic_vfmul_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -16,7 +18,7 @@ entry: %a = call @llvm.riscv.vfmul.nxv1f16.nxv1f16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -26,10 +28,10 @@ declare @llvm.riscv.vfmul.mask.nxv1f16.nxv1f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfmul_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmul_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -41,7 +43,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -49,9 +51,9 @@ entry: declare @llvm.riscv.vfmul.nxv2f16.nxv2f16( , , - i32); + iXLen); -define @intrinsic_vfmul_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, i32 %2) nounwind { +define @intrinsic_vfmul_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -61,7 +63,7 @@ entry: %a = call @llvm.riscv.vfmul.nxv2f16.nxv2f16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -71,10 +73,10 @@ declare @llvm.riscv.vfmul.mask.nxv2f16.nxv2f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfmul_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmul_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -86,7 +88,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -94,9 +96,9 @@ entry: declare @llvm.riscv.vfmul.nxv4f16.nxv4f16( , , - i32); + iXLen); -define @intrinsic_vfmul_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, i32 %2) nounwind { +define @intrinsic_vfmul_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -106,7 +108,7 @@ entry: %a = call @llvm.riscv.vfmul.nxv4f16.nxv4f16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -116,10 +118,10 @@ declare @llvm.riscv.vfmul.mask.nxv4f16.nxv4f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfmul_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmul_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -131,7 +133,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -139,9 +141,9 @@ entry: declare @llvm.riscv.vfmul.nxv8f16.nxv8f16( , , - i32); + iXLen); -define @intrinsic_vfmul_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, i32 %2) nounwind { +define @intrinsic_vfmul_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -151,7 +153,7 @@ entry: %a = call @llvm.riscv.vfmul.nxv8f16.nxv8f16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -161,10 +163,10 @@ declare @llvm.riscv.vfmul.mask.nxv8f16.nxv8f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfmul_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmul_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -176,7 +178,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -184,9 +186,9 @@ entry: declare @llvm.riscv.vfmul.nxv16f16.nxv16f16( , , - i32); + iXLen); -define @intrinsic_vfmul_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, i32 %2) nounwind { +define @intrinsic_vfmul_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -196,7 +198,7 @@ entry: %a = call @llvm.riscv.vfmul.nxv16f16.nxv16f16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -206,10 +208,10 @@ declare @llvm.riscv.vfmul.mask.nxv16f16.nxv16f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfmul_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmul_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -221,7 +223,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -229,9 +231,9 @@ entry: declare @llvm.riscv.vfmul.nxv32f16.nxv32f16( , , - i32); + iXLen); -define @intrinsic_vfmul_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, i32 %2) nounwind { +define @intrinsic_vfmul_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vv_nxv32f16_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu @@ -241,7 +243,7 @@ entry: %a = call @llvm.riscv.vfmul.nxv32f16.nxv32f16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -251,10 +253,10 @@ declare @llvm.riscv.vfmul.mask.nxv32f16.nxv32f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfmul_mask_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmul_mask_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv32f16_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re16.v v24, (a0) @@ -267,7 +269,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -275,9 +277,9 @@ entry: declare @llvm.riscv.vfmul.nxv1f32.nxv1f32( , , - i32); + iXLen); -define @intrinsic_vfmul_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, i32 %2) nounwind { +define @intrinsic_vfmul_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -287,7 +289,7 @@ entry: %a = call @llvm.riscv.vfmul.nxv1f32.nxv1f32( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -297,10 +299,10 @@ declare @llvm.riscv.vfmul.mask.nxv1f32.nxv1f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfmul_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmul_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -312,7 +314,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -320,9 +322,9 @@ entry: declare @llvm.riscv.vfmul.nxv2f32.nxv2f32( , , - i32); + iXLen); -define @intrinsic_vfmul_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, i32 %2) nounwind { +define @intrinsic_vfmul_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -332,7 +334,7 @@ entry: %a = call @llvm.riscv.vfmul.nxv2f32.nxv2f32( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -342,10 +344,10 @@ declare @llvm.riscv.vfmul.mask.nxv2f32.nxv2f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfmul_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmul_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -357,7 +359,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -365,9 +367,9 @@ entry: declare @llvm.riscv.vfmul.nxv4f32.nxv4f32( , , - i32); + iXLen); -define @intrinsic_vfmul_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, i32 %2) nounwind { +define @intrinsic_vfmul_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -377,7 +379,7 @@ entry: %a = call @llvm.riscv.vfmul.nxv4f32.nxv4f32( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -387,10 +389,10 @@ declare @llvm.riscv.vfmul.mask.nxv4f32.nxv4f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfmul_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmul_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -402,7 +404,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -410,9 +412,9 @@ entry: declare @llvm.riscv.vfmul.nxv8f32.nxv8f32( , , - i32); + iXLen); -define @intrinsic_vfmul_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, i32 %2) nounwind { +define @intrinsic_vfmul_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -422,7 +424,7 @@ entry: %a = call @llvm.riscv.vfmul.nxv8f32.nxv8f32( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -432,10 +434,10 @@ declare @llvm.riscv.vfmul.mask.nxv8f32.nxv8f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfmul_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmul_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -447,7 +449,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -455,9 +457,9 @@ entry: declare @llvm.riscv.vfmul.nxv16f32.nxv16f32( , , - i32); + iXLen); -define @intrinsic_vfmul_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, i32 %2) nounwind { +define @intrinsic_vfmul_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vv_nxv16f32_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu @@ -467,7 +469,7 @@ entry: %a = call @llvm.riscv.vfmul.nxv16f32.nxv16f32( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -477,10 +479,10 @@ declare @llvm.riscv.vfmul.mask.nxv16f32.nxv16f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfmul_mask_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmul_mask_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv16f32_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re32.v v24, (a0) @@ -493,7 +495,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -501,9 +503,9 @@ entry: declare @llvm.riscv.vfmul.nxv1f64.nxv1f64( , , - i32); + iXLen); -define @intrinsic_vfmul_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, i32 %2) nounwind { +define @intrinsic_vfmul_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu @@ -513,7 +515,7 @@ entry: %a = call @llvm.riscv.vfmul.nxv1f64.nxv1f64( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -523,10 +525,10 @@ declare @llvm.riscv.vfmul.mask.nxv1f64.nxv1f64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfmul_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmul_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu @@ -538,7 +540,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -546,9 +548,9 @@ entry: declare @llvm.riscv.vfmul.nxv2f64.nxv2f64( , , - i32); + iXLen); -define @intrinsic_vfmul_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, i32 %2) nounwind { +define @intrinsic_vfmul_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu @@ -558,7 +560,7 @@ entry: %a = call @llvm.riscv.vfmul.nxv2f64.nxv2f64( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -568,10 +570,10 @@ declare @llvm.riscv.vfmul.mask.nxv2f64.nxv2f64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfmul_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmul_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu @@ -583,7 +585,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -591,9 +593,9 @@ entry: declare @llvm.riscv.vfmul.nxv4f64.nxv4f64( , , - i32); + iXLen); -define @intrinsic_vfmul_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, i32 %2) nounwind { +define @intrinsic_vfmul_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu @@ -603,7 +605,7 @@ entry: %a = call @llvm.riscv.vfmul.nxv4f64.nxv4f64( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -613,10 +615,10 @@ declare @llvm.riscv.vfmul.mask.nxv4f64.nxv4f64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfmul_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmul_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu @@ -628,7 +630,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -636,9 +638,9 @@ entry: declare @llvm.riscv.vfmul.nxv8f64.nxv8f64( , , - i32); + iXLen); -define @intrinsic_vfmul_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, i32 %2) nounwind { +define @intrinsic_vfmul_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vv_nxv8f64_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu @@ -648,7 +650,7 @@ entry: %a = call @llvm.riscv.vfmul.nxv8f64.nxv8f64( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -658,10 +660,10 @@ declare @llvm.riscv.vfmul.mask.nxv8f64.nxv8f64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfmul_mask_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmul_mask_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv8f64_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re64.v v24, (a0) @@ -674,7 +676,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -682,9 +684,9 @@ entry: declare @llvm.riscv.vfmul.nxv1f16.f16( , half, - i32); + iXLen); -define @intrinsic_vfmul_vf_nxv1f16_nxv1f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfmul_vf_nxv1f16_nxv1f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -694,7 +696,7 @@ entry: %a = call @llvm.riscv.vfmul.nxv1f16.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -704,10 +706,10 @@ declare @llvm.riscv.vfmul.mask.nxv1f16.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfmul_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfmul_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -719,7 +721,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -727,9 +729,9 @@ entry: declare @llvm.riscv.vfmul.nxv2f16.f16( , half, - i32); + iXLen); -define @intrinsic_vfmul_vf_nxv2f16_nxv2f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfmul_vf_nxv2f16_nxv2f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -739,7 +741,7 @@ entry: %a = call @llvm.riscv.vfmul.nxv2f16.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -749,10 +751,10 @@ declare @llvm.riscv.vfmul.mask.nxv2f16.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfmul_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfmul_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -764,7 +766,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -772,9 +774,9 @@ entry: declare @llvm.riscv.vfmul.nxv4f16.f16( , half, - i32); + iXLen); -define @intrinsic_vfmul_vf_nxv4f16_nxv4f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfmul_vf_nxv4f16_nxv4f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -784,7 +786,7 @@ entry: %a = call @llvm.riscv.vfmul.nxv4f16.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -794,10 +796,10 @@ declare @llvm.riscv.vfmul.mask.nxv4f16.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfmul_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfmul_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -809,7 +811,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -817,9 +819,9 @@ entry: declare @llvm.riscv.vfmul.nxv8f16.f16( , half, - i32); + iXLen); -define @intrinsic_vfmul_vf_nxv8f16_nxv8f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfmul_vf_nxv8f16_nxv8f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -829,7 +831,7 @@ entry: %a = call @llvm.riscv.vfmul.nxv8f16.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -839,10 +841,10 @@ declare @llvm.riscv.vfmul.mask.nxv8f16.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfmul_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfmul_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -854,7 +856,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -862,9 +864,9 @@ entry: declare @llvm.riscv.vfmul.nxv16f16.f16( , half, - i32); + iXLen); -define @intrinsic_vfmul_vf_nxv16f16_nxv16f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfmul_vf_nxv16f16_nxv16f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -874,7 +876,7 @@ entry: %a = call @llvm.riscv.vfmul.nxv16f16.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -884,10 +886,10 @@ declare @llvm.riscv.vfmul.mask.nxv16f16.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfmul_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfmul_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -899,7 +901,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -907,9 +909,9 @@ entry: declare @llvm.riscv.vfmul.nxv32f16.f16( , half, - i32); + iXLen); -define @intrinsic_vfmul_vf_nxv32f16_nxv32f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfmul_vf_nxv32f16_nxv32f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu @@ -919,7 +921,7 @@ entry: %a = call @llvm.riscv.vfmul.nxv32f16.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -929,10 +931,10 @@ declare @llvm.riscv.vfmul.mask.nxv32f16.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfmul_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfmul_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu @@ -944,7 +946,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -952,9 +954,9 @@ entry: declare @llvm.riscv.vfmul.nxv1f32.f32( , float, - i32); + iXLen); -define @intrinsic_vfmul_vf_nxv1f32_nxv1f32_f32( %0, float %1, i32 %2) nounwind { +define @intrinsic_vfmul_vf_nxv1f32_nxv1f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -964,7 +966,7 @@ entry: %a = call @llvm.riscv.vfmul.nxv1f32.f32( %0, float %1, - i32 %2) + iXLen %2) ret %a } @@ -974,10 +976,10 @@ declare @llvm.riscv.vfmul.mask.nxv1f32.f32( , float, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfmul_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +define @intrinsic_vfmul_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -989,7 +991,7 @@ entry: %1, float %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -997,9 +999,9 @@ entry: declare @llvm.riscv.vfmul.nxv2f32.f32( , float, - i32); + iXLen); -define @intrinsic_vfmul_vf_nxv2f32_nxv2f32_f32( %0, float %1, i32 %2) nounwind { +define @intrinsic_vfmul_vf_nxv2f32_nxv2f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -1009,7 +1011,7 @@ entry: %a = call @llvm.riscv.vfmul.nxv2f32.f32( %0, float %1, - i32 %2) + iXLen %2) ret %a } @@ -1019,10 +1021,10 @@ declare @llvm.riscv.vfmul.mask.nxv2f32.f32( , float, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfmul_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +define @intrinsic_vfmul_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -1034,7 +1036,7 @@ entry: %1, float %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1042,9 +1044,9 @@ entry: declare @llvm.riscv.vfmul.nxv4f32.f32( , float, - i32); + iXLen); -define @intrinsic_vfmul_vf_nxv4f32_nxv4f32_f32( %0, float %1, i32 %2) nounwind { +define @intrinsic_vfmul_vf_nxv4f32_nxv4f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -1054,7 +1056,7 @@ entry: %a = call @llvm.riscv.vfmul.nxv4f32.f32( %0, float %1, - i32 %2) + iXLen %2) ret %a } @@ -1064,10 +1066,10 @@ declare @llvm.riscv.vfmul.mask.nxv4f32.f32( , float, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfmul_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +define @intrinsic_vfmul_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -1079,7 +1081,7 @@ entry: %1, float %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1087,9 +1089,9 @@ entry: declare @llvm.riscv.vfmul.nxv8f32.f32( , float, - i32); + iXLen); -define @intrinsic_vfmul_vf_nxv8f32_nxv8f32_f32( %0, float %1, i32 %2) nounwind { +define @intrinsic_vfmul_vf_nxv8f32_nxv8f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -1099,7 +1101,7 @@ entry: %a = call @llvm.riscv.vfmul.nxv8f32.f32( %0, float %1, - i32 %2) + iXLen %2) ret %a } @@ -1109,10 +1111,10 @@ declare @llvm.riscv.vfmul.mask.nxv8f32.f32( , float, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfmul_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +define @intrinsic_vfmul_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -1124,7 +1126,7 @@ entry: %1, float %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1132,9 +1134,9 @@ entry: declare @llvm.riscv.vfmul.nxv16f32.f32( , float, - i32); + iXLen); -define @intrinsic_vfmul_vf_nxv16f32_nxv16f32_f32( %0, float %1, i32 %2) nounwind { +define @intrinsic_vfmul_vf_nxv16f32_nxv16f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu @@ -1144,7 +1146,7 @@ entry: %a = call @llvm.riscv.vfmul.nxv16f32.f32( %0, float %1, - i32 %2) + iXLen %2) ret %a } @@ -1154,10 +1156,10 @@ declare @llvm.riscv.vfmul.mask.nxv16f32.f32( , float, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfmul_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +define @intrinsic_vfmul_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu @@ -1169,7 +1171,7 @@ entry: %1, float %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1177,9 +1179,9 @@ entry: declare @llvm.riscv.vfmul.nxv1f64.f64( , double, - i32); + iXLen); -define @intrinsic_vfmul_vf_nxv1f64_nxv1f64_f64( %0, double %1, i32 %2) nounwind { +define @intrinsic_vfmul_vf_nxv1f64_nxv1f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu @@ -1189,7 +1191,7 @@ entry: %a = call @llvm.riscv.vfmul.nxv1f64.f64( %0, double %1, - i32 %2) + iXLen %2) ret %a } @@ -1199,10 +1201,10 @@ declare @llvm.riscv.vfmul.mask.nxv1f64.f64( , double, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfmul_mask_vf_nxv1f64_nxv1f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { +define @intrinsic_vfmul_mask_vf_nxv1f64_nxv1f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu @@ -1214,7 +1216,7 @@ entry: %1, double %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1222,9 +1224,9 @@ entry: declare @llvm.riscv.vfmul.nxv2f64.f64( , double, - i32); + iXLen); -define @intrinsic_vfmul_vf_nxv2f64_nxv2f64_f64( %0, double %1, i32 %2) nounwind { +define @intrinsic_vfmul_vf_nxv2f64_nxv2f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu @@ -1234,7 +1236,7 @@ entry: %a = call @llvm.riscv.vfmul.nxv2f64.f64( %0, double %1, - i32 %2) + iXLen %2) ret %a } @@ -1244,10 +1246,10 @@ declare @llvm.riscv.vfmul.mask.nxv2f64.f64( , double, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfmul_mask_vf_nxv2f64_nxv2f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { +define @intrinsic_vfmul_mask_vf_nxv2f64_nxv2f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu @@ -1259,7 +1261,7 @@ entry: %1, double %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1267,9 +1269,9 @@ entry: declare @llvm.riscv.vfmul.nxv4f64.f64( , double, - i32); + iXLen); -define @intrinsic_vfmul_vf_nxv4f64_nxv4f64_f64( %0, double %1, i32 %2) nounwind { +define @intrinsic_vfmul_vf_nxv4f64_nxv4f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu @@ -1279,7 +1281,7 @@ entry: %a = call @llvm.riscv.vfmul.nxv4f64.f64( %0, double %1, - i32 %2) + iXLen %2) ret %a } @@ -1289,10 +1291,10 @@ declare @llvm.riscv.vfmul.mask.nxv4f64.f64( , double, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfmul_mask_vf_nxv4f64_nxv4f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { +define @intrinsic_vfmul_mask_vf_nxv4f64_nxv4f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu @@ -1304,7 +1306,7 @@ entry: %1, double %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1312,9 +1314,9 @@ entry: declare @llvm.riscv.vfmul.nxv8f64.f64( , double, - i32); + iXLen); -define @intrinsic_vfmul_vf_nxv8f64_nxv8f64_f64( %0, double %1, i32 %2) nounwind { +define @intrinsic_vfmul_vf_nxv8f64_nxv8f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu @@ -1324,7 +1326,7 @@ entry: %a = call @llvm.riscv.vfmul.nxv8f64.f64( %0, double %1, - i32 %2) + iXLen %2) ret %a } @@ -1334,10 +1336,10 @@ declare @llvm.riscv.vfmul.mask.nxv8f64.f64( , double, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfmul_mask_vf_nxv8f64_nxv8f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { +define @intrinsic_vfmul_mask_vf_nxv8f64_nxv8f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu @@ -1349,7 +1351,7 @@ entry: %1, double %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmv.s.f-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfmv.s.f-rv64.ll deleted file mode 100644 index ee619309b9e3c1667f8407c1bde06f4fdc84e5b4..0000000000000000000000000000000000000000 --- a/llvm/test/CodeGen/RISCV/rvv/vfmv.s.f-rv64.ll +++ /dev/null @@ -1,197 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+d,+v,+zfh -target-abi lp64d -verify-machineinstrs < %s | FileCheck %s - -declare @llvm.riscv.vfmv.s.f.nxv1f16(, half, i64) - -define @intrinsic_vfmv.s.f_f_nxv1f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu -; CHECK-NEXT: vfmv.s.f v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmv.s.f.nxv1f16( %0, half %1, i64 %2) - ret %a -} - -declare @llvm.riscv.vfmv.s.f.nxv2f16(, half, i64) - -define @intrinsic_vfmv.s.f_f_nxv2f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu -; CHECK-NEXT: vfmv.s.f v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmv.s.f.nxv2f16( %0, half %1, i64 %2) - ret %a -} - -declare @llvm.riscv.vfmv.s.f.nxv4f16(, half, i64) - -define @intrinsic_vfmv.s.f_f_nxv4f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu -; CHECK-NEXT: vfmv.s.f v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmv.s.f.nxv4f16( %0, half %1, i64 %2) - ret %a -} - -declare @llvm.riscv.vfmv.s.f.nxv8f16(, half, i64) - -define @intrinsic_vfmv.s.f_f_nxv8f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu -; CHECK-NEXT: vfmv.s.f v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmv.s.f.nxv8f16( %0, half %1, i64 %2) - ret %a -} - -declare @llvm.riscv.vfmv.s.f.nxv16f16(, half, i64) - -define @intrinsic_vfmv.s.f_f_nxv16f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu -; CHECK-NEXT: vfmv.s.f v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmv.s.f.nxv16f16( %0, half %1, i64 %2) - ret %a -} - -declare @llvm.riscv.vfmv.s.f.nxv32f16(, half, i64) - -define @intrinsic_vfmv.s.f_f_nxv32f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv32f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu -; CHECK-NEXT: vfmv.s.f v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmv.s.f.nxv32f16( %0, half %1, i64 %2) - ret %a -} - -declare @llvm.riscv.vfmv.s.f.nxv1f32(, float, i64) - -define @intrinsic_vfmv.s.f_f_nxv1f32( %0, float %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu -; CHECK-NEXT: vfmv.s.f v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmv.s.f.nxv1f32( %0, float %1, i64 %2) - ret %a -} - -declare @llvm.riscv.vfmv.s.f.nxv2f32(, float, i64) - -define @intrinsic_vfmv.s.f_f_nxv2f32( %0, float %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu -; CHECK-NEXT: vfmv.s.f v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmv.s.f.nxv2f32( %0, float %1, i64 %2) - ret %a -} - -declare @llvm.riscv.vfmv.s.f.nxv4f32(, float, i64) - -define @intrinsic_vfmv.s.f_f_nxv4f32( %0, float %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu -; CHECK-NEXT: vfmv.s.f v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmv.s.f.nxv4f32( %0, float %1, i64 %2) - ret %a -} - -declare @llvm.riscv.vfmv.s.f.nxv8f32(, float, i64) - -define @intrinsic_vfmv.s.f_f_nxv8f32( %0, float %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu -; CHECK-NEXT: vfmv.s.f v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmv.s.f.nxv8f32( %0, float %1, i64 %2) - ret %a -} - -declare @llvm.riscv.vfmv.s.f.nxv16f32(, float, i64) - -define @intrinsic_vfmv.s.f_f_nxv16f32( %0, float %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv16f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu -; CHECK-NEXT: vfmv.s.f v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmv.s.f.nxv16f32( %0, float %1, i64 %2) - ret %a -} - -declare @llvm.riscv.vfmv.s.f.nxv1f64(, double, i64) - -define @intrinsic_vfmv.s.f_f_nxv1f64( %0, double %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu -; CHECK-NEXT: vfmv.s.f v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmv.s.f.nxv1f64( %0, double %1, i64 %2) - ret %a -} - -declare @llvm.riscv.vfmv.s.f.nxv2f64(, double, i64) - -define @intrinsic_vfmv.s.f_f_nxv2f64( %0, double %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu -; CHECK-NEXT: vfmv.s.f v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmv.s.f.nxv2f64( %0, double %1, i64 %2) - ret %a -} - -declare @llvm.riscv.vfmv.s.f.nxv4f64(, double, i64) - -define @intrinsic_vfmv.s.f_f_nxv4f64( %0, double %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu -; CHECK-NEXT: vfmv.s.f v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmv.s.f.nxv4f64( %0, double %1, i64 %2) - ret %a -} - -declare @llvm.riscv.vfmv.s.f.nxv8f64(, double, i64) - -define @intrinsic_vfmv.s.f_f_nxv8f64( %0, double %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv8f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu -; CHECK-NEXT: vfmv.s.f v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmv.s.f.nxv8f64( %0, double %1, i64 %2) - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmv.s.f-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfmv.s.f.ll similarity index 74% rename from llvm/test/CodeGen/RISCV/rvv/vfmv.s.f-rv32.ll rename to llvm/test/CodeGen/RISCV/rvv/vfmv.s.f.ll index 4d47c000788fb8eac5d401024d86e212ee4bbafd..8464dc2f6299758b1da59d10c3ff7a1a77b1cab9 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfmv.s.f-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmv.s.f.ll @@ -1,197 +1,200 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+d,+v,+zfh -target-abi ilp32d -verify-machineinstrs < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfmv.s.f.nxv1f16(, half, i32) +declare @llvm.riscv.vfmv.s.f.nxv1f16(, half, iXLen) -define @intrinsic_vfmv.s.f_f_nxv1f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfmv.s.f_f_nxv1f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu ; CHECK-NEXT: vfmv.s.f v8, fa0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vfmv.s.f.nxv1f16( %0, half %1, i32 %2) + %a = call @llvm.riscv.vfmv.s.f.nxv1f16( %0, half %1, iXLen %2) ret %a } -declare @llvm.riscv.vfmv.s.f.nxv2f16(, half, i32) +declare @llvm.riscv.vfmv.s.f.nxv2f16(, half, iXLen) -define @intrinsic_vfmv.s.f_f_nxv2f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfmv.s.f_f_nxv2f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu ; CHECK-NEXT: vfmv.s.f v8, fa0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vfmv.s.f.nxv2f16( %0, half %1, i32 %2) + %a = call @llvm.riscv.vfmv.s.f.nxv2f16( %0, half %1, iXLen %2) ret %a } -declare @llvm.riscv.vfmv.s.f.nxv4f16(, half, i32) +declare @llvm.riscv.vfmv.s.f.nxv4f16(, half, iXLen) -define @intrinsic_vfmv.s.f_f_nxv4f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfmv.s.f_f_nxv4f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu ; CHECK-NEXT: vfmv.s.f v8, fa0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vfmv.s.f.nxv4f16( %0, half %1, i32 %2) + %a = call @llvm.riscv.vfmv.s.f.nxv4f16( %0, half %1, iXLen %2) ret %a } -declare @llvm.riscv.vfmv.s.f.nxv8f16(, half, i32) +declare @llvm.riscv.vfmv.s.f.nxv8f16(, half, iXLen) -define @intrinsic_vfmv.s.f_f_nxv8f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfmv.s.f_f_nxv8f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu ; CHECK-NEXT: vfmv.s.f v8, fa0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vfmv.s.f.nxv8f16( %0, half %1, i32 %2) + %a = call @llvm.riscv.vfmv.s.f.nxv8f16( %0, half %1, iXLen %2) ret %a } -declare @llvm.riscv.vfmv.s.f.nxv16f16(, half, i32) +declare @llvm.riscv.vfmv.s.f.nxv16f16(, half, iXLen) -define @intrinsic_vfmv.s.f_f_nxv16f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfmv.s.f_f_nxv16f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu ; CHECK-NEXT: vfmv.s.f v8, fa0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vfmv.s.f.nxv16f16( %0, half %1, i32 %2) + %a = call @llvm.riscv.vfmv.s.f.nxv16f16( %0, half %1, iXLen %2) ret %a } -declare @llvm.riscv.vfmv.s.f.nxv32f16(, half, i32) +declare @llvm.riscv.vfmv.s.f.nxv32f16(, half, iXLen) -define @intrinsic_vfmv.s.f_f_nxv32f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfmv.s.f_f_nxv32f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv32f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu ; CHECK-NEXT: vfmv.s.f v8, fa0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vfmv.s.f.nxv32f16( %0, half %1, i32 %2) + %a = call @llvm.riscv.vfmv.s.f.nxv32f16( %0, half %1, iXLen %2) ret %a } -declare @llvm.riscv.vfmv.s.f.nxv1f32(, float, i32) +declare @llvm.riscv.vfmv.s.f.nxv1f32(, float, iXLen) -define @intrinsic_vfmv.s.f_f_nxv1f32( %0, float %1, i32 %2) nounwind { +define @intrinsic_vfmv.s.f_f_nxv1f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu ; CHECK-NEXT: vfmv.s.f v8, fa0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vfmv.s.f.nxv1f32( %0, float %1, i32 %2) + %a = call @llvm.riscv.vfmv.s.f.nxv1f32( %0, float %1, iXLen %2) ret %a } -declare @llvm.riscv.vfmv.s.f.nxv2f32(, float, i32) +declare @llvm.riscv.vfmv.s.f.nxv2f32(, float, iXLen) -define @intrinsic_vfmv.s.f_f_nxv2f32( %0, float %1, i32 %2) nounwind { +define @intrinsic_vfmv.s.f_f_nxv2f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu ; CHECK-NEXT: vfmv.s.f v8, fa0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vfmv.s.f.nxv2f32( %0, float %1, i32 %2) + %a = call @llvm.riscv.vfmv.s.f.nxv2f32( %0, float %1, iXLen %2) ret %a } -declare @llvm.riscv.vfmv.s.f.nxv4f32(, float, i32) +declare @llvm.riscv.vfmv.s.f.nxv4f32(, float, iXLen) -define @intrinsic_vfmv.s.f_f_nxv4f32( %0, float %1, i32 %2) nounwind { +define @intrinsic_vfmv.s.f_f_nxv4f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu ; CHECK-NEXT: vfmv.s.f v8, fa0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vfmv.s.f.nxv4f32( %0, float %1, i32 %2) + %a = call @llvm.riscv.vfmv.s.f.nxv4f32( %0, float %1, iXLen %2) ret %a } -declare @llvm.riscv.vfmv.s.f.nxv8f32(, float, i32) +declare @llvm.riscv.vfmv.s.f.nxv8f32(, float, iXLen) -define @intrinsic_vfmv.s.f_f_nxv8f32( %0, float %1, i32 %2) nounwind { +define @intrinsic_vfmv.s.f_f_nxv8f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu ; CHECK-NEXT: vfmv.s.f v8, fa0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vfmv.s.f.nxv8f32( %0, float %1, i32 %2) + %a = call @llvm.riscv.vfmv.s.f.nxv8f32( %0, float %1, iXLen %2) ret %a } -declare @llvm.riscv.vfmv.s.f.nxv16f32(, float, i32) +declare @llvm.riscv.vfmv.s.f.nxv16f32(, float, iXLen) -define @intrinsic_vfmv.s.f_f_nxv16f32( %0, float %1, i32 %2) nounwind { +define @intrinsic_vfmv.s.f_f_nxv16f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv16f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu ; CHECK-NEXT: vfmv.s.f v8, fa0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vfmv.s.f.nxv16f32( %0, float %1, i32 %2) + %a = call @llvm.riscv.vfmv.s.f.nxv16f32( %0, float %1, iXLen %2) ret %a } -declare @llvm.riscv.vfmv.s.f.nxv1f64(, double, i32) +declare @llvm.riscv.vfmv.s.f.nxv1f64(, double, iXLen) -define @intrinsic_vfmv.s.f_f_nxv1f64( %0, double %1, i32 %2) nounwind { +define @intrinsic_vfmv.s.f_f_nxv1f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu ; CHECK-NEXT: vfmv.s.f v8, fa0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vfmv.s.f.nxv1f64( %0, double %1, i32 %2) + %a = call @llvm.riscv.vfmv.s.f.nxv1f64( %0, double %1, iXLen %2) ret %a } -declare @llvm.riscv.vfmv.s.f.nxv2f64(, double, i32) +declare @llvm.riscv.vfmv.s.f.nxv2f64(, double, iXLen) -define @intrinsic_vfmv.s.f_f_nxv2f64( %0, double %1, i32 %2) nounwind { +define @intrinsic_vfmv.s.f_f_nxv2f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu ; CHECK-NEXT: vfmv.s.f v8, fa0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vfmv.s.f.nxv2f64( %0, double %1, i32 %2) + %a = call @llvm.riscv.vfmv.s.f.nxv2f64( %0, double %1, iXLen %2) ret %a } -declare @llvm.riscv.vfmv.s.f.nxv4f64(, double, i32) +declare @llvm.riscv.vfmv.s.f.nxv4f64(, double, iXLen) -define @intrinsic_vfmv.s.f_f_nxv4f64( %0, double %1, i32 %2) nounwind { +define @intrinsic_vfmv.s.f_f_nxv4f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu ; CHECK-NEXT: vfmv.s.f v8, fa0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vfmv.s.f.nxv4f64( %0, double %1, i32 %2) + %a = call @llvm.riscv.vfmv.s.f.nxv4f64( %0, double %1, iXLen %2) ret %a } -declare @llvm.riscv.vfmv.s.f.nxv8f64(, double, i32) +declare @llvm.riscv.vfmv.s.f.nxv8f64(, double, iXLen) -define @intrinsic_vfmv.s.f_f_nxv8f64( %0, double %1, i32 %2) nounwind { +define @intrinsic_vfmv.s.f_f_nxv8f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv8f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu ; CHECK-NEXT: vfmv.s.f v8, fa0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vfmv.s.f.nxv8f64( %0, double %1, i32 %2) + %a = call @llvm.riscv.vfmv.s.f.nxv8f64( %0, double %1, iXLen %2) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmv.v.f-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfmv.v.f-rv64.ll deleted file mode 100644 index 1c4d9f6e89dc40c30e8b9f9ef83574acd1833e05..0000000000000000000000000000000000000000 --- a/llvm/test/CodeGen/RISCV/rvv/vfmv.v.f-rv64.ll +++ /dev/null @@ -1,482 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -target-abi lp64d -verify-machineinstrs \ -; RUN: < %s | FileCheck %s -declare @llvm.riscv.vfmv.v.f.nxv1f16( - half, - i64); - -define @intrinsic_vfmv.v.f_f_nxv1f16(half %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfmv.v.f v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmv.v.f.nxv1f16( - half %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfmv.v.f.nxv2f16( - half, - i64); - -define @intrinsic_vfmv.v.f_f_nxv2f16(half %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfmv.v.f v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmv.v.f.nxv2f16( - half %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfmv.v.f.nxv4f16( - half, - i64); - -define @intrinsic_vfmv.v.f_f_nxv4f16(half %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmv.v.f.nxv4f16( - half %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfmv.v.f.nxv8f16( - half, - i64); - -define @intrinsic_vfmv.v.f_f_nxv8f16(half %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfmv.v.f v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmv.v.f.nxv8f16( - half %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfmv.v.f.nxv16f16( - half, - i64); - -define @intrinsic_vfmv.v.f_f_nxv16f16(half %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfmv.v.f v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmv.v.f.nxv16f16( - half %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfmv.v.f.nxv32f16( - half, - i64); - -define @intrinsic_vfmv.v.f_f_nxv32f16(half %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv32f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu -; CHECK-NEXT: vfmv.v.f v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmv.v.f.nxv32f16( - half %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfmv.v.f.nxv1f32( - float, - i64); - -define @intrinsic_vfmv.v.f_f_nxv1f32(float %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfmv.v.f v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmv.v.f.nxv1f32( - float %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfmv.v.f.nxv2f32( - float, - i64); - -define @intrinsic_vfmv.v.f_f_nxv2f32(float %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmv.v.f.nxv2f32( - float %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfmv.v.f.nxv4f32( - float, - i64); - -define @intrinsic_vfmv.v.f_f_nxv4f32(float %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfmv.v.f v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmv.v.f.nxv4f32( - float %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfmv.v.f.nxv8f32( - float, - i64); - -define @intrinsic_vfmv.v.f_f_nxv8f32(float %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfmv.v.f v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmv.v.f.nxv8f32( - float %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfmv.v.f.nxv16f32( - float, - i64); - -define @intrinsic_vfmv.v.f_f_nxv16f32(float %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv16f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu -; CHECK-NEXT: vfmv.v.f v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmv.v.f.nxv16f32( - float %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfmv.v.f.nxv1f64( - double, - i64); - -define @intrinsic_vfmv.v.f_f_nxv1f64(double %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmv.v.f.nxv1f64( - double %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfmv.v.f.nxv2f64( - double, - i64); - -define @intrinsic_vfmv.v.f_f_nxv2f64(double %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vfmv.v.f v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmv.v.f.nxv2f64( - double %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfmv.v.f.nxv4f64( - double, - i64); - -define @intrinsic_vfmv.v.f_f_nxv4f64(double %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vfmv.v.f v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmv.v.f.nxv4f64( - double %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfmv.v.f.nxv8f64( - double, - i64); - -define @intrinsic_vfmv.v.f_f_nxv8f64(double %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv8f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu -; CHECK-NEXT: vfmv.v.f v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmv.v.f.nxv8f64( - double %0, - i64 %1) - - ret %a -} - -define @intrinsic_vfmv.v.f_zero_nxv1f16(i64 %0) nounwind { -; CHECK-LABEL: intrinsic_vfmv.v.f_zero_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vmv.v.i v8, 0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmv.v.f.nxv1f16( - half 0.0, - i64 %0) - - ret %a -} - -define @intrinsic_vmv.v.i_zero_nxv2f16(i64 %0) nounwind { -; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vmv.v.i v8, 0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmv.v.f.nxv2f16( - half 0.0, - i64 %0) - - ret %a -} - -define @intrinsic_vmv.v.i_zero_nxv4f16(i64 %0) nounwind { -; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vmv.v.i v8, 0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmv.v.f.nxv4f16( - half 0.0, - i64 %0) - - ret %a -} - -define @intrinsic_vmv.v.i_zero_nxv8f16(i64 %0) nounwind { -; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vmv.v.i v8, 0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmv.v.f.nxv8f16( - half 0.0, - i64 %0) - - ret %a -} - -define @intrinsic_vmv.v.i_zero_nxv16f16(i64 %0) nounwind { -; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vmv.v.i v8, 0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmv.v.f.nxv16f16( - half 0.0, - i64 %0) - - ret %a -} - -define @intrinsic_vmv.v.i_zero_nxv32f16(i64 %0) nounwind { -; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv32f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu -; CHECK-NEXT: vmv.v.i v8, 0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmv.v.f.nxv32f16( - half 0.0, - i64 %0) - - ret %a -} - -define @intrinsic_vmv.v.i_zero_nxv1f32(i64 %0) nounwind { -; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vmv.v.i v8, 0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmv.v.f.nxv1f32( - float 0.0, - i64 %0) - - ret %a -} - -define @intrinsic_vmv.v.i_zero_nxv2f32(i64 %0) nounwind { -; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vmv.v.i v8, 0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmv.v.f.nxv2f32( - float 0.0, - i64 %0) - - ret %a -} - -define @intrinsic_vmv.v.i_zero_nxv4f32(i64 %0) nounwind { -; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vmv.v.i v8, 0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmv.v.f.nxv4f32( - float 0.0, - i64 %0) - - ret %a -} - -define @intrinsic_vmv.v.i_zero_nxv8f32(i64 %0) nounwind { -; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vmv.v.i v8, 0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmv.v.f.nxv8f32( - float 0.0, - i64 %0) - - ret %a -} - -define @intrinsic_vmv.v.i_zero_nxv16f32(i64 %0) nounwind { -; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv16f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu -; CHECK-NEXT: vmv.v.i v8, 0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmv.v.f.nxv16f32( - float 0.0, - i64 %0) - - ret %a -} - -define @intrinsic_vmv.v.i_zero_nxv1f64(i64 %0) nounwind { -; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vmv.v.i v8, 0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmv.v.f.nxv1f64( - double 0.0, - i64 %0) - - ret %a -} - -define @intrinsic_vmv.v.i_zero_nxv2f64(i64 %0) nounwind { -; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vmv.v.i v8, 0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmv.v.f.nxv2f64( - double 0.0, - i64 %0) - - ret %a -} - -define @intrinsic_vmv.v.i_zero_nxv4f64(i64 %0) nounwind { -; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vmv.v.i v8, 0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmv.v.f.nxv4f64( - double 0.0, - i64 %0) - - ret %a -} - -define @intrinsic_vmv.v.i_zero_nxv8f64(i64 %0) nounwind { -; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv8f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu -; CHECK-NEXT: vmv.v.i v8, 0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmv.v.f.nxv8f64( - double 0.0, - i64 %0) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmv.v.f-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfmv.v.f.ll similarity index 82% rename from llvm/test/CodeGen/RISCV/rvv/vfmv.v.f-rv32.ll rename to llvm/test/CodeGen/RISCV/rvv/vfmv.v.f.ll index b4acc57dcd81e68ca74a38eccc431f8963131ec4..6e0613e3e49ba87dd2a749ed3fed9297dd5dc509 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfmv.v.f-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmv.v.f.ll @@ -1,11 +1,13 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -target-abi ilp32d -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfmv.v.f.nxv1f16( half, - i32); + iXLen); -define @intrinsic_vfmv.v.f_f_nxv1f16(half %0, i32 %1) nounwind { +define @intrinsic_vfmv.v.f_f_nxv1f16(half %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -14,16 +16,16 @@ define @intrinsic_vfmv.v.f_f_nxv1f16(half %0, i32 %1) nounwi entry: %a = call @llvm.riscv.vfmv.v.f.nxv1f16( half %0, - i32 %1) + iXLen %1) ret %a } declare @llvm.riscv.vfmv.v.f.nxv2f16( half, - i32); + iXLen); -define @intrinsic_vfmv.v.f_f_nxv2f16(half %0, i32 %1) nounwind { +define @intrinsic_vfmv.v.f_f_nxv2f16(half %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -32,16 +34,16 @@ define @intrinsic_vfmv.v.f_f_nxv2f16(half %0, i32 %1) nounwi entry: %a = call @llvm.riscv.vfmv.v.f.nxv2f16( half %0, - i32 %1) + iXLen %1) ret %a } declare @llvm.riscv.vfmv.v.f.nxv4f16( half, - i32); + iXLen); -define @intrinsic_vfmv.v.f_f_nxv4f16(half %0, i32 %1) nounwind { +define @intrinsic_vfmv.v.f_f_nxv4f16(half %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -50,16 +52,16 @@ define @intrinsic_vfmv.v.f_f_nxv4f16(half %0, i32 %1) nounwi entry: %a = call @llvm.riscv.vfmv.v.f.nxv4f16( half %0, - i32 %1) + iXLen %1) ret %a } declare @llvm.riscv.vfmv.v.f.nxv8f16( half, - i32); + iXLen); -define @intrinsic_vfmv.v.f_f_nxv8f16(half %0, i32 %1) nounwind { +define @intrinsic_vfmv.v.f_f_nxv8f16(half %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -68,16 +70,16 @@ define @intrinsic_vfmv.v.f_f_nxv8f16(half %0, i32 %1) nounwi entry: %a = call @llvm.riscv.vfmv.v.f.nxv8f16( half %0, - i32 %1) + iXLen %1) ret %a } declare @llvm.riscv.vfmv.v.f.nxv16f16( half, - i32); + iXLen); -define @intrinsic_vfmv.v.f_f_nxv16f16(half %0, i32 %1) nounwind { +define @intrinsic_vfmv.v.f_f_nxv16f16(half %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -86,16 +88,16 @@ define @intrinsic_vfmv.v.f_f_nxv16f16(half %0, i32 %1) noun entry: %a = call @llvm.riscv.vfmv.v.f.nxv16f16( half %0, - i32 %1) + iXLen %1) ret %a } declare @llvm.riscv.vfmv.v.f.nxv32f16( half, - i32); + iXLen); -define @intrinsic_vfmv.v.f_f_nxv32f16(half %0, i32 %1) nounwind { +define @intrinsic_vfmv.v.f_f_nxv32f16(half %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv32f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu @@ -104,16 +106,16 @@ define @intrinsic_vfmv.v.f_f_nxv32f16(half %0, i32 %1) noun entry: %a = call @llvm.riscv.vfmv.v.f.nxv32f16( half %0, - i32 %1) + iXLen %1) ret %a } declare @llvm.riscv.vfmv.v.f.nxv1f32( float, - i32); + iXLen); -define @intrinsic_vfmv.v.f_f_nxv1f32(float %0, i32 %1) nounwind { +define @intrinsic_vfmv.v.f_f_nxv1f32(float %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -122,16 +124,16 @@ define @intrinsic_vfmv.v.f_f_nxv1f32(float %0, i32 %1) noun entry: %a = call @llvm.riscv.vfmv.v.f.nxv1f32( float %0, - i32 %1) + iXLen %1) ret %a } declare @llvm.riscv.vfmv.v.f.nxv2f32( float, - i32); + iXLen); -define @intrinsic_vfmv.v.f_f_nxv2f32(float %0, i32 %1) nounwind { +define @intrinsic_vfmv.v.f_f_nxv2f32(float %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -140,16 +142,16 @@ define @intrinsic_vfmv.v.f_f_nxv2f32(float %0, i32 %1) noun entry: %a = call @llvm.riscv.vfmv.v.f.nxv2f32( float %0, - i32 %1) + iXLen %1) ret %a } declare @llvm.riscv.vfmv.v.f.nxv4f32( float, - i32); + iXLen); -define @intrinsic_vfmv.v.f_f_nxv4f32(float %0, i32 %1) nounwind { +define @intrinsic_vfmv.v.f_f_nxv4f32(float %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -158,16 +160,16 @@ define @intrinsic_vfmv.v.f_f_nxv4f32(float %0, i32 %1) noun entry: %a = call @llvm.riscv.vfmv.v.f.nxv4f32( float %0, - i32 %1) + iXLen %1) ret %a } declare @llvm.riscv.vfmv.v.f.nxv8f32( float, - i32); + iXLen); -define @intrinsic_vfmv.v.f_f_nxv8f32(float %0, i32 %1) nounwind { +define @intrinsic_vfmv.v.f_f_nxv8f32(float %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -176,16 +178,16 @@ define @intrinsic_vfmv.v.f_f_nxv8f32(float %0, i32 %1) noun entry: %a = call @llvm.riscv.vfmv.v.f.nxv8f32( float %0, - i32 %1) + iXLen %1) ret %a } declare @llvm.riscv.vfmv.v.f.nxv16f32( float, - i32); + iXLen); -define @intrinsic_vfmv.v.f_f_nxv16f32(float %0, i32 %1) nounwind { +define @intrinsic_vfmv.v.f_f_nxv16f32(float %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv16f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu @@ -194,16 +196,16 @@ define @intrinsic_vfmv.v.f_f_nxv16f32(float %0, i32 %1) no entry: %a = call @llvm.riscv.vfmv.v.f.nxv16f32( float %0, - i32 %1) + iXLen %1) ret %a } declare @llvm.riscv.vfmv.v.f.nxv1f64( double, - i32); + iXLen); -define @intrinsic_vfmv.v.f_f_nxv1f64(double %0, i32 %1) nounwind { +define @intrinsic_vfmv.v.f_f_nxv1f64(double %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu @@ -212,16 +214,16 @@ define @intrinsic_vfmv.v.f_f_nxv1f64(double %0, i32 %1) no entry: %a = call @llvm.riscv.vfmv.v.f.nxv1f64( double %0, - i32 %1) + iXLen %1) ret %a } declare @llvm.riscv.vfmv.v.f.nxv2f64( double, - i32); + iXLen); -define @intrinsic_vfmv.v.f_f_nxv2f64(double %0, i32 %1) nounwind { +define @intrinsic_vfmv.v.f_f_nxv2f64(double %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu @@ -230,16 +232,16 @@ define @intrinsic_vfmv.v.f_f_nxv2f64(double %0, i32 %1) no entry: %a = call @llvm.riscv.vfmv.v.f.nxv2f64( double %0, - i32 %1) + iXLen %1) ret %a } declare @llvm.riscv.vfmv.v.f.nxv4f64( double, - i32); + iXLen); -define @intrinsic_vfmv.v.f_f_nxv4f64(double %0, i32 %1) nounwind { +define @intrinsic_vfmv.v.f_f_nxv4f64(double %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu @@ -248,16 +250,16 @@ define @intrinsic_vfmv.v.f_f_nxv4f64(double %0, i32 %1) no entry: %a = call @llvm.riscv.vfmv.v.f.nxv4f64( double %0, - i32 %1) + iXLen %1) ret %a } declare @llvm.riscv.vfmv.v.f.nxv8f64( double, - i32); + iXLen); -define @intrinsic_vfmv.v.f_f_nxv8f64(double %0, i32 %1) nounwind { +define @intrinsic_vfmv.v.f_f_nxv8f64(double %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv8f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu @@ -266,12 +268,12 @@ define @intrinsic_vfmv.v.f_f_nxv8f64(double %0, i32 %1) no entry: %a = call @llvm.riscv.vfmv.v.f.nxv8f64( double %0, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vfmv.v.f_zero_nxv1f16(i32 %0) nounwind { +define @intrinsic_vfmv.v.f_zero_nxv1f16(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vfmv.v.f_zero_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -280,12 +282,12 @@ define @intrinsic_vfmv.v.f_zero_nxv1f16(i32 %0) nounwind { entry: %a = call @llvm.riscv.vfmv.v.f.nxv1f16( half 0.0, - i32 %0) + iXLen %0) ret %a } -define @intrinsic_vmv.v.i_zero_nxv2f16(i32 %0) nounwind { +define @intrinsic_vmv.v.i_zero_nxv2f16(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -294,12 +296,12 @@ define @intrinsic_vmv.v.i_zero_nxv2f16(i32 %0) nounwind { entry: %a = call @llvm.riscv.vfmv.v.f.nxv2f16( half 0.0, - i32 %0) + iXLen %0) ret %a } -define @intrinsic_vmv.v.i_zero_nxv4f16(i32 %0) nounwind { +define @intrinsic_vmv.v.i_zero_nxv4f16(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -308,12 +310,12 @@ define @intrinsic_vmv.v.i_zero_nxv4f16(i32 %0) nounwind { entry: %a = call @llvm.riscv.vfmv.v.f.nxv4f16( half 0.0, - i32 %0) + iXLen %0) ret %a } -define @intrinsic_vmv.v.i_zero_nxv8f16(i32 %0) nounwind { +define @intrinsic_vmv.v.i_zero_nxv8f16(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -322,12 +324,12 @@ define @intrinsic_vmv.v.i_zero_nxv8f16(i32 %0) nounwind { entry: %a = call @llvm.riscv.vfmv.v.f.nxv8f16( half 0.0, - i32 %0) + iXLen %0) ret %a } -define @intrinsic_vmv.v.i_zero_nxv16f16(i32 %0) nounwind { +define @intrinsic_vmv.v.i_zero_nxv16f16(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -336,12 +338,12 @@ define @intrinsic_vmv.v.i_zero_nxv16f16(i32 %0) nounwind { entry: %a = call @llvm.riscv.vfmv.v.f.nxv16f16( half 0.0, - i32 %0) + iXLen %0) ret %a } -define @intrinsic_vmv.v.i_zero_nxv32f16(i32 %0) nounwind { +define @intrinsic_vmv.v.i_zero_nxv32f16(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv32f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu @@ -350,12 +352,12 @@ define @intrinsic_vmv.v.i_zero_nxv32f16(i32 %0) nounwind { entry: %a = call @llvm.riscv.vfmv.v.f.nxv32f16( half 0.0, - i32 %0) + iXLen %0) ret %a } -define @intrinsic_vmv.v.i_zero_nxv1f32(i32 %0) nounwind { +define @intrinsic_vmv.v.i_zero_nxv1f32(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -364,12 +366,12 @@ define @intrinsic_vmv.v.i_zero_nxv1f32(i32 %0) nounwind { entry: %a = call @llvm.riscv.vfmv.v.f.nxv1f32( float 0.0, - i32 %0) + iXLen %0) ret %a } -define @intrinsic_vmv.v.i_zero_nxv2f32(i32 %0) nounwind { +define @intrinsic_vmv.v.i_zero_nxv2f32(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -378,12 +380,12 @@ define @intrinsic_vmv.v.i_zero_nxv2f32(i32 %0) nounwind { entry: %a = call @llvm.riscv.vfmv.v.f.nxv2f32( float 0.0, - i32 %0) + iXLen %0) ret %a } -define @intrinsic_vmv.v.i_zero_nxv4f32(i32 %0) nounwind { +define @intrinsic_vmv.v.i_zero_nxv4f32(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -392,12 +394,12 @@ define @intrinsic_vmv.v.i_zero_nxv4f32(i32 %0) nounwind { entry: %a = call @llvm.riscv.vfmv.v.f.nxv4f32( float 0.0, - i32 %0) + iXLen %0) ret %a } -define @intrinsic_vmv.v.i_zero_nxv8f32(i32 %0) nounwind { +define @intrinsic_vmv.v.i_zero_nxv8f32(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -406,12 +408,12 @@ define @intrinsic_vmv.v.i_zero_nxv8f32(i32 %0) nounwind { entry: %a = call @llvm.riscv.vfmv.v.f.nxv8f32( float 0.0, - i32 %0) + iXLen %0) ret %a } -define @intrinsic_vmv.v.i_zero_nxv16f32(i32 %0) nounwind { +define @intrinsic_vmv.v.i_zero_nxv16f32(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv16f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu @@ -420,12 +422,12 @@ define @intrinsic_vmv.v.i_zero_nxv16f32(i32 %0) nounwind { entry: %a = call @llvm.riscv.vfmv.v.f.nxv16f32( float 0.0, - i32 %0) + iXLen %0) ret %a } -define @intrinsic_vmv.v.i_zero_nxv1f64(i32 %0) nounwind { +define @intrinsic_vmv.v.i_zero_nxv1f64(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu @@ -434,12 +436,12 @@ define @intrinsic_vmv.v.i_zero_nxv1f64(i32 %0) nounwind { entry: %a = call @llvm.riscv.vfmv.v.f.nxv1f64( double 0.0, - i32 %0) + iXLen %0) ret %a } -define @intrinsic_vmv.v.i_zero_nxv2f64(i32 %0) nounwind { +define @intrinsic_vmv.v.i_zero_nxv2f64(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu @@ -448,12 +450,12 @@ define @intrinsic_vmv.v.i_zero_nxv2f64(i32 %0) nounwind { entry: %a = call @llvm.riscv.vfmv.v.f.nxv2f64( double 0.0, - i32 %0) + iXLen %0) ret %a } -define @intrinsic_vmv.v.i_zero_nxv4f64(i32 %0) nounwind { +define @intrinsic_vmv.v.i_zero_nxv4f64(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu @@ -462,12 +464,12 @@ define @intrinsic_vmv.v.i_zero_nxv4f64(i32 %0) nounwind { entry: %a = call @llvm.riscv.vfmv.v.f.nxv4f64( double 0.0, - i32 %0) + iXLen %0) ret %a } -define @intrinsic_vmv.v.i_zero_nxv8f64(i32 %0) nounwind { +define @intrinsic_vmv.v.i_zero_nxv8f64(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv8f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu @@ -476,7 +478,7 @@ define @intrinsic_vmv.v.i_zero_nxv8f64(i32 %0) nounwind { entry: %a = call @llvm.riscv.vfmv.v.f.nxv8f64( double 0.0, - i32 %0) + iXLen %0) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-f-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-f-rv64.ll deleted file mode 100644 index 2e35f75eb89c0f897d589dc5fe38480c8f273af7..0000000000000000000000000000000000000000 --- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-f-rv64.ll +++ /dev/null @@ -1,380 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s -declare @llvm.riscv.vfncvt.f.f.w.nxv1f16.nxv1f32( - , - i64); - -define @intrinsic_vfncvt_f.f.w_nxv1f16_nxv1f32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_f.f.w_nxv1f16_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfncvt.f.f.w v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.f.f.w.nxv1f16.nxv1f32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.f.f.w.mask.nxv1f16.nxv1f32( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_f.f.w_nxv1f16_nxv1f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv1f16_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfncvt.f.f.w v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.f.f.w.mask.nxv1f16.nxv1f32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.f.f.w.nxv2f16.nxv2f32( - , - i64); - -define @intrinsic_vfncvt_f.f.w_nxv2f16_nxv2f32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_f.f.w_nxv2f16_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfncvt.f.f.w v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.f.f.w.nxv2f16.nxv2f32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.f.f.w.mask.nxv2f16.nxv2f32( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_f.f.w_nxv2f16_nxv2f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv2f16_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfncvt.f.f.w v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.f.f.w.mask.nxv2f16.nxv2f32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.f.f.w.nxv4f16.nxv4f32( - , - i64); - -define @intrinsic_vfncvt_f.f.w_nxv4f16_nxv4f32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_f.f.w_nxv4f16_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfncvt.f.f.w v10, v8 -; CHECK-NEXT: vmv.v.v v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.f.f.w.nxv4f16.nxv4f32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.f.f.w.mask.nxv4f16.nxv4f32( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_f.f.w_nxv4f16_nxv4f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv4f16_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfncvt.f.f.w v8, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.f.f.w.mask.nxv4f16.nxv4f32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.f.f.w.nxv8f16.nxv8f32( - , - i64); - -define @intrinsic_vfncvt_f.f.w_nxv8f16_nxv8f32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_f.f.w_nxv8f16_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfncvt.f.f.w v12, v8 -; CHECK-NEXT: vmv.v.v v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.f.f.w.nxv8f16.nxv8f32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.f.f.w.mask.nxv8f16.nxv8f32( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_f.f.w_nxv8f16_nxv8f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv8f16_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfncvt.f.f.w v8, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.f.f.w.mask.nxv8f16.nxv8f32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.f.f.w.nxv16f16.nxv16f32( - , - i64); - -define @intrinsic_vfncvt_f.f.w_nxv16f16_nxv16f32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_f.f.w_nxv16f16_nxv16f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfncvt.f.f.w v16, v8 -; CHECK-NEXT: vmv.v.v v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.f.f.w.nxv16f16.nxv16f32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.f.f.w.mask.nxv16f16.nxv16f32( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_f.f.w_nxv16f16_nxv16f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv16f16_nxv16f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfncvt.f.f.w v8, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.f.f.w.mask.nxv16f16.nxv16f32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.f.f.w.nxv1f32.nxv1f64( - , - i64); - -define @intrinsic_vfncvt_f.f.w_nxv1f32_nxv1f64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_f.f.w_nxv1f32_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfncvt.f.f.w v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.f.f.w.nxv1f32.nxv1f64( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.f.f.w.mask.nxv1f32.nxv1f64( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_f.f.w_nxv1f32_nxv1f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv1f32_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfncvt.f.f.w v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.f.f.w.mask.nxv1f32.nxv1f64( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.f.f.w.nxv2f32.nxv2f64( - , - i64); - -define @intrinsic_vfncvt_f.f.w_nxv2f32_nxv2f64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_f.f.w_nxv2f32_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfncvt.f.f.w v10, v8 -; CHECK-NEXT: vmv.v.v v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.f.f.w.nxv2f32.nxv2f64( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.f.f.w.mask.nxv2f32.nxv2f64( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_f.f.w_nxv2f32_nxv2f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv2f32_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfncvt.f.f.w v8, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.f.f.w.mask.nxv2f32.nxv2f64( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.f.f.w.nxv4f32.nxv4f64( - , - i64); - -define @intrinsic_vfncvt_f.f.w_nxv4f32_nxv4f64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_f.f.w_nxv4f32_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfncvt.f.f.w v12, v8 -; CHECK-NEXT: vmv.v.v v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.f.f.w.nxv4f32.nxv4f64( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.f.f.w.mask.nxv4f32.nxv4f64( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_f.f.w_nxv4f32_nxv4f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv4f32_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfncvt.f.f.w v8, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.f.f.w.mask.nxv4f32.nxv4f64( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.f.f.w.nxv8f32.nxv8f64( - , - i64); - -define @intrinsic_vfncvt_f.f.w_nxv8f32_nxv8f64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_f.f.w_nxv8f32_nxv8f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfncvt.f.f.w v16, v8 -; CHECK-NEXT: vmv.v.v v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.f.f.w.nxv8f32.nxv8f64( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.f.f.w.mask.nxv8f32.nxv8f64( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_f.f.w_nxv8f32_nxv8f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv8f32_nxv8f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfncvt.f.f.w v8, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.f.f.w.mask.nxv8f32.nxv8f64( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-f-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-f.ll similarity index 86% rename from llvm/test/CodeGen/RISCV/rvv/vfncvt-f-f-rv32.ll rename to llvm/test/CodeGen/RISCV/rvv/vfncvt-f-f.ll index 014ff81bada19a69109c82f7944afd4978de708e..e757261e7363d0dfe9f1c4a4aecb85f85245bc7c 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-f-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-f.ll @@ -1,11 +1,13 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfncvt.f.f.w.nxv1f16.nxv1f32( , - i32); + iXLen); -define @intrinsic_vfncvt_f.f.w_nxv1f16_nxv1f32( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_f.f.w_nxv1f16_nxv1f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_f.f.w_nxv1f16_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -15,7 +17,7 @@ define @intrinsic_vfncvt_f.f.w_nxv1f16_nxv1f32( @llvm.riscv.vfncvt.f.f.w.nxv1f16.nxv1f32( %0, - i32 %1) + iXLen %1) ret %a } @@ -24,10 +26,10 @@ declare @llvm.riscv.vfncvt.f.f.w.mask.nxv1f16.nxv1f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_f.f.w_nxv1f16_nxv1f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_f.f.w_nxv1f16_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv1f16_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -38,16 +40,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.f.f.w.nxv2f16.nxv2f32( , - i32); + iXLen); -define @intrinsic_vfncvt_f.f.w_nxv2f16_nxv2f32( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_f.f.w_nxv2f16_nxv2f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_f.f.w_nxv2f16_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -57,7 +59,7 @@ define @intrinsic_vfncvt_f.f.w_nxv2f16_nxv2f32( @llvm.riscv.vfncvt.f.f.w.nxv2f16.nxv2f32( %0, - i32 %1) + iXLen %1) ret %a } @@ -66,10 +68,10 @@ declare @llvm.riscv.vfncvt.f.f.w.mask.nxv2f16.nxv2f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_f.f.w_nxv2f16_nxv2f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_f.f.w_nxv2f16_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv2f16_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -80,16 +82,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.f.f.w.nxv4f16.nxv4f32( , - i32); + iXLen); -define @intrinsic_vfncvt_f.f.w_nxv4f16_nxv4f32( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_f.f.w_nxv4f16_nxv4f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_f.f.w_nxv4f16_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -99,7 +101,7 @@ define @intrinsic_vfncvt_f.f.w_nxv4f16_nxv4f32( @llvm.riscv.vfncvt.f.f.w.nxv4f16.nxv4f32( %0, - i32 %1) + iXLen %1) ret %a } @@ -108,10 +110,10 @@ declare @llvm.riscv.vfncvt.f.f.w.mask.nxv4f16.nxv4f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_f.f.w_nxv4f16_nxv4f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_f.f.w_nxv4f16_nxv4f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv4f16_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -122,16 +124,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.f.f.w.nxv8f16.nxv8f32( , - i32); + iXLen); -define @intrinsic_vfncvt_f.f.w_nxv8f16_nxv8f32( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_f.f.w_nxv8f16_nxv8f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_f.f.w_nxv8f16_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -141,7 +143,7 @@ define @intrinsic_vfncvt_f.f.w_nxv8f16_nxv8f32( @llvm.riscv.vfncvt.f.f.w.nxv8f16.nxv8f32( %0, - i32 %1) + iXLen %1) ret %a } @@ -150,10 +152,10 @@ declare @llvm.riscv.vfncvt.f.f.w.mask.nxv8f16.nxv8f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_f.f.w_nxv8f16_nxv8f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_f.f.w_nxv8f16_nxv8f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv8f16_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -164,16 +166,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.f.f.w.nxv16f16.nxv16f32( , - i32); + iXLen); -define @intrinsic_vfncvt_f.f.w_nxv16f16_nxv16f32( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_f.f.w_nxv16f16_nxv16f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_f.f.w_nxv16f16_nxv16f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -183,7 +185,7 @@ define @intrinsic_vfncvt_f.f.w_nxv16f16_nxv16f32( @llvm.riscv.vfncvt.f.f.w.nxv16f16.nxv16f32( %0, - i32 %1) + iXLen %1) ret %a } @@ -192,10 +194,10 @@ declare @llvm.riscv.vfncvt.f.f.w.mask.nxv16f16.nxv16f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_f.f.w_nxv16f16_nxv16f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_f.f.w_nxv16f16_nxv16f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv16f16_nxv16f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -206,16 +208,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.f.f.w.nxv1f32.nxv1f64( , - i32); + iXLen); -define @intrinsic_vfncvt_f.f.w_nxv1f32_nxv1f64( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_f.f.w_nxv1f32_nxv1f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_f.f.w_nxv1f32_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -225,7 +227,7 @@ define @intrinsic_vfncvt_f.f.w_nxv1f32_nxv1f64( @llvm.riscv.vfncvt.f.f.w.nxv1f32.nxv1f64( %0, - i32 %1) + iXLen %1) ret %a } @@ -234,10 +236,10 @@ declare @llvm.riscv.vfncvt.f.f.w.mask.nxv1f32.nxv1f64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_f.f.w_nxv1f32_nxv1f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_f.f.w_nxv1f32_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv1f32_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -248,16 +250,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.f.f.w.nxv2f32.nxv2f64( , - i32); + iXLen); -define @intrinsic_vfncvt_f.f.w_nxv2f32_nxv2f64( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_f.f.w_nxv2f32_nxv2f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_f.f.w_nxv2f32_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -267,7 +269,7 @@ define @intrinsic_vfncvt_f.f.w_nxv2f32_nxv2f64( @llvm.riscv.vfncvt.f.f.w.nxv2f32.nxv2f64( %0, - i32 %1) + iXLen %1) ret %a } @@ -276,10 +278,10 @@ declare @llvm.riscv.vfncvt.f.f.w.mask.nxv2f32.nxv2f64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_f.f.w_nxv2f32_nxv2f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_f.f.w_nxv2f32_nxv2f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv2f32_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -290,16 +292,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.f.f.w.nxv4f32.nxv4f64( , - i32); + iXLen); -define @intrinsic_vfncvt_f.f.w_nxv4f32_nxv4f64( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_f.f.w_nxv4f32_nxv4f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_f.f.w_nxv4f32_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -309,7 +311,7 @@ define @intrinsic_vfncvt_f.f.w_nxv4f32_nxv4f64( @llvm.riscv.vfncvt.f.f.w.nxv4f32.nxv4f64( %0, - i32 %1) + iXLen %1) ret %a } @@ -318,10 +320,10 @@ declare @llvm.riscv.vfncvt.f.f.w.mask.nxv4f32.nxv4f64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_f.f.w_nxv4f32_nxv4f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_f.f.w_nxv4f32_nxv4f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv4f32_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -332,16 +334,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.f.f.w.nxv8f32.nxv8f64( , - i32); + iXLen); -define @intrinsic_vfncvt_f.f.w_nxv8f32_nxv8f64( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_f.f.w_nxv8f32_nxv8f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_f.f.w_nxv8f32_nxv8f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -351,7 +353,7 @@ define @intrinsic_vfncvt_f.f.w_nxv8f32_nxv8f64( @llvm.riscv.vfncvt.f.f.w.nxv8f32.nxv8f64( %0, - i32 %1) + iXLen %1) ret %a } @@ -360,10 +362,10 @@ declare @llvm.riscv.vfncvt.f.f.w.mask.nxv8f32.nxv8f64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_f.f.w_nxv8f32_nxv8f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_f.f.w_nxv8f32_nxv8f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv8f32_nxv8f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -374,7 +376,7 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-x-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-x-rv64.ll deleted file mode 100644 index d8d55df8e4589eb944aeb9f50b5726b640f531ba..0000000000000000000000000000000000000000 --- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-x-rv64.ll +++ /dev/null @@ -1,380 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s -declare @llvm.riscv.vfncvt.f.x.w.nxv1f16.nxv1i32( - , - i64); - -define @intrinsic_vfncvt_f.x.w_nxv1f16_nxv1i32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_f.x.w_nxv1f16_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfncvt.f.x.w v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.f.x.w.nxv1f16.nxv1i32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.f.x.w.mask.nxv1f16.nxv1i32( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_f.x.w_nxv1f16_nxv1i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_f.x.w_nxv1f16_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfncvt.f.x.w v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.f.x.w.mask.nxv1f16.nxv1i32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.f.x.w.nxv2f16.nxv2i32( - , - i64); - -define @intrinsic_vfncvt_f.x.w_nxv2f16_nxv2i32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_f.x.w_nxv2f16_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfncvt.f.x.w v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.f.x.w.nxv2f16.nxv2i32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.f.x.w.mask.nxv2f16.nxv2i32( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_f.x.w_nxv2f16_nxv2i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_f.x.w_nxv2f16_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfncvt.f.x.w v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.f.x.w.mask.nxv2f16.nxv2i32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.f.x.w.nxv4f16.nxv4i32( - , - i64); - -define @intrinsic_vfncvt_f.x.w_nxv4f16_nxv4i32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_f.x.w_nxv4f16_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfncvt.f.x.w v10, v8 -; CHECK-NEXT: vmv.v.v v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.f.x.w.nxv4f16.nxv4i32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.f.x.w.mask.nxv4f16.nxv4i32( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_f.x.w_nxv4f16_nxv4i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_f.x.w_nxv4f16_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfncvt.f.x.w v8, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.f.x.w.mask.nxv4f16.nxv4i32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.f.x.w.nxv8f16.nxv8i32( - , - i64); - -define @intrinsic_vfncvt_f.x.w_nxv8f16_nxv8i32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_f.x.w_nxv8f16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfncvt.f.x.w v12, v8 -; CHECK-NEXT: vmv.v.v v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.f.x.w.nxv8f16.nxv8i32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.f.x.w.mask.nxv8f16.nxv8i32( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_f.x.w_nxv8f16_nxv8i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_f.x.w_nxv8f16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfncvt.f.x.w v8, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.f.x.w.mask.nxv8f16.nxv8i32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.f.x.w.nxv16f16.nxv16i32( - , - i64); - -define @intrinsic_vfncvt_f.x.w_nxv16f16_nxv16i32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_f.x.w_nxv16f16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfncvt.f.x.w v16, v8 -; CHECK-NEXT: vmv.v.v v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.f.x.w.nxv16f16.nxv16i32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.f.x.w.mask.nxv16f16.nxv16i32( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_f.x.w_nxv16f16_nxv16i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_f.x.w_nxv16f16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfncvt.f.x.w v8, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.f.x.w.mask.nxv16f16.nxv16i32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.f.x.w.nxv1f32.nxv1i64( - , - i64); - -define @intrinsic_vfncvt_f.x.w_nxv1f32_nxv1i64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_f.x.w_nxv1f32_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfncvt.f.x.w v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.f.x.w.nxv1f32.nxv1i64( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.f.x.w.mask.nxv1f32.nxv1i64( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_f.x.w_nxv1f32_nxv1i64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_f.x.w_nxv1f32_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfncvt.f.x.w v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.f.x.w.mask.nxv1f32.nxv1i64( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.f.x.w.nxv2f32.nxv2i64( - , - i64); - -define @intrinsic_vfncvt_f.x.w_nxv2f32_nxv2i64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_f.x.w_nxv2f32_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfncvt.f.x.w v10, v8 -; CHECK-NEXT: vmv.v.v v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.f.x.w.nxv2f32.nxv2i64( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.f.x.w.mask.nxv2f32.nxv2i64( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_f.x.w_nxv2f32_nxv2i64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_f.x.w_nxv2f32_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfncvt.f.x.w v8, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.f.x.w.mask.nxv2f32.nxv2i64( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.f.x.w.nxv4f32.nxv4i64( - , - i64); - -define @intrinsic_vfncvt_f.x.w_nxv4f32_nxv4i64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_f.x.w_nxv4f32_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfncvt.f.x.w v12, v8 -; CHECK-NEXT: vmv.v.v v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.f.x.w.nxv4f32.nxv4i64( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.f.x.w.mask.nxv4f32.nxv4i64( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_f.x.w_nxv4f32_nxv4i64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_f.x.w_nxv4f32_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfncvt.f.x.w v8, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.f.x.w.mask.nxv4f32.nxv4i64( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.f.x.w.nxv8f32.nxv8i64( - , - i64); - -define @intrinsic_vfncvt_f.x.w_nxv8f32_nxv8i64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_f.x.w_nxv8f32_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfncvt.f.x.w v16, v8 -; CHECK-NEXT: vmv.v.v v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.f.x.w.nxv8f32.nxv8i64( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.f.x.w.mask.nxv8f32.nxv8i64( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_f.x.w_nxv8f32_nxv8i64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_f.x.w_nxv8f32_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfncvt.f.x.w v8, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.f.x.w.mask.nxv8f32.nxv8i64( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-x-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-x.ll similarity index 87% rename from llvm/test/CodeGen/RISCV/rvv/vfncvt-f-x-rv32.ll rename to llvm/test/CodeGen/RISCV/rvv/vfncvt-f-x.ll index ab5f66bf692faa1f6be6dd5429239d1838cca30a..eedc7c163399921c4a21583d87c816cda161986d 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-x-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-x.ll @@ -1,11 +1,13 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfncvt.f.x.w.nxv1f16.nxv1i32( , - i32); + iXLen); -define @intrinsic_vfncvt_f.x.w_nxv1f16_nxv1i32( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_f.x.w_nxv1f16_nxv1i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_f.x.w_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -15,7 +17,7 @@ define @intrinsic_vfncvt_f.x.w_nxv1f16_nxv1i32( @llvm.riscv.vfncvt.f.x.w.nxv1f16.nxv1i32( %0, - i32 %1) + iXLen %1) ret %a } @@ -24,10 +26,10 @@ declare @llvm.riscv.vfncvt.f.x.w.mask.nxv1f16.nxv1i32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_f.x.w_nxv1f16_nxv1i32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_f.x.w_nxv1f16_nxv1i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.x.w_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -38,16 +40,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.f.x.w.nxv2f16.nxv2i32( , - i32); + iXLen); -define @intrinsic_vfncvt_f.x.w_nxv2f16_nxv2i32( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_f.x.w_nxv2f16_nxv2i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_f.x.w_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -57,7 +59,7 @@ define @intrinsic_vfncvt_f.x.w_nxv2f16_nxv2i32( @llvm.riscv.vfncvt.f.x.w.nxv2f16.nxv2i32( %0, - i32 %1) + iXLen %1) ret %a } @@ -66,10 +68,10 @@ declare @llvm.riscv.vfncvt.f.x.w.mask.nxv2f16.nxv2i32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_f.x.w_nxv2f16_nxv2i32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_f.x.w_nxv2f16_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.x.w_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -80,16 +82,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.f.x.w.nxv4f16.nxv4i32( , - i32); + iXLen); -define @intrinsic_vfncvt_f.x.w_nxv4f16_nxv4i32( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_f.x.w_nxv4f16_nxv4i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_f.x.w_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -99,7 +101,7 @@ define @intrinsic_vfncvt_f.x.w_nxv4f16_nxv4i32( @llvm.riscv.vfncvt.f.x.w.nxv4f16.nxv4i32( %0, - i32 %1) + iXLen %1) ret %a } @@ -108,10 +110,10 @@ declare @llvm.riscv.vfncvt.f.x.w.mask.nxv4f16.nxv4i32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_f.x.w_nxv4f16_nxv4i32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_f.x.w_nxv4f16_nxv4i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.x.w_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -122,16 +124,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.f.x.w.nxv8f16.nxv8i32( , - i32); + iXLen); -define @intrinsic_vfncvt_f.x.w_nxv8f16_nxv8i32( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_f.x.w_nxv8f16_nxv8i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_f.x.w_nxv8f16_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -141,7 +143,7 @@ define @intrinsic_vfncvt_f.x.w_nxv8f16_nxv8i32( @llvm.riscv.vfncvt.f.x.w.nxv8f16.nxv8i32( %0, - i32 %1) + iXLen %1) ret %a } @@ -150,10 +152,10 @@ declare @llvm.riscv.vfncvt.f.x.w.mask.nxv8f16.nxv8i32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_f.x.w_nxv8f16_nxv8i32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_f.x.w_nxv8f16_nxv8i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.x.w_nxv8f16_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -164,16 +166,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.f.x.w.nxv16f16.nxv16i32( , - i32); + iXLen); -define @intrinsic_vfncvt_f.x.w_nxv16f16_nxv16i32( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_f.x.w_nxv16f16_nxv16i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_f.x.w_nxv16f16_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -183,7 +185,7 @@ define @intrinsic_vfncvt_f.x.w_nxv16f16_nxv16i32( @llvm.riscv.vfncvt.f.x.w.nxv16f16.nxv16i32( %0, - i32 %1) + iXLen %1) ret %a } @@ -192,10 +194,10 @@ declare @llvm.riscv.vfncvt.f.x.w.mask.nxv16f16.nxv16i32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_f.x.w_nxv16f16_nxv16i32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_f.x.w_nxv16f16_nxv16i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.x.w_nxv16f16_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -206,16 +208,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.f.x.w.nxv1f32.nxv1i64( , - i32); + iXLen); -define @intrinsic_vfncvt_f.x.w_nxv1f32_nxv1i64( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_f.x.w_nxv1f32_nxv1i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_f.x.w_nxv1f32_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -225,7 +227,7 @@ define @intrinsic_vfncvt_f.x.w_nxv1f32_nxv1i64( @llvm.riscv.vfncvt.f.x.w.nxv1f32.nxv1i64( %0, - i32 %1) + iXLen %1) ret %a } @@ -234,10 +236,10 @@ declare @llvm.riscv.vfncvt.f.x.w.mask.nxv1f32.nxv1i64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_f.x.w_nxv1f32_nxv1i64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_f.x.w_nxv1f32_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.x.w_nxv1f32_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -248,16 +250,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.f.x.w.nxv2f32.nxv2i64( , - i32); + iXLen); -define @intrinsic_vfncvt_f.x.w_nxv2f32_nxv2i64( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_f.x.w_nxv2f32_nxv2i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_f.x.w_nxv2f32_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -267,7 +269,7 @@ define @intrinsic_vfncvt_f.x.w_nxv2f32_nxv2i64( @llvm.riscv.vfncvt.f.x.w.nxv2f32.nxv2i64( %0, - i32 %1) + iXLen %1) ret %a } @@ -276,10 +278,10 @@ declare @llvm.riscv.vfncvt.f.x.w.mask.nxv2f32.nxv2i64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_f.x.w_nxv2f32_nxv2i64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_f.x.w_nxv2f32_nxv2i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.x.w_nxv2f32_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -290,16 +292,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.f.x.w.nxv4f32.nxv4i64( , - i32); + iXLen); -define @intrinsic_vfncvt_f.x.w_nxv4f32_nxv4i64( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_f.x.w_nxv4f32_nxv4i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_f.x.w_nxv4f32_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -309,7 +311,7 @@ define @intrinsic_vfncvt_f.x.w_nxv4f32_nxv4i64( @llvm.riscv.vfncvt.f.x.w.nxv4f32.nxv4i64( %0, - i32 %1) + iXLen %1) ret %a } @@ -318,10 +320,10 @@ declare @llvm.riscv.vfncvt.f.x.w.mask.nxv4f32.nxv4i64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_f.x.w_nxv4f32_nxv4i64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_f.x.w_nxv4f32_nxv4i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.x.w_nxv4f32_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -332,16 +334,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.f.x.w.nxv8f32.nxv8i64( , - i32); + iXLen); -define @intrinsic_vfncvt_f.x.w_nxv8f32_nxv8i64( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_f.x.w_nxv8f32_nxv8i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_f.x.w_nxv8f32_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -351,7 +353,7 @@ define @intrinsic_vfncvt_f.x.w_nxv8f32_nxv8i64( @llvm.riscv.vfncvt.f.x.w.nxv8f32.nxv8i64( %0, - i32 %1) + iXLen %1) ret %a } @@ -360,10 +362,10 @@ declare @llvm.riscv.vfncvt.f.x.w.mask.nxv8f32.nxv8i64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_f.x.w_nxv8f32_nxv8i64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_f.x.w_nxv8f32_nxv8i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.x.w_nxv8f32_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -374,7 +376,7 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-xu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-xu-rv64.ll deleted file mode 100644 index 32a23932b074b8d088280da7631d727a7f2b0769..0000000000000000000000000000000000000000 --- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-xu-rv64.ll +++ /dev/null @@ -1,380 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s -declare @llvm.riscv.vfncvt.f.xu.w.nxv1f16.nxv1i32( - , - i64); - -define @intrinsic_vfncvt_f.xu.w_nxv1f16_nxv1i32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_f.xu.w_nxv1f16_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfncvt.f.xu.w v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.f.xu.w.nxv1f16.nxv1i32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.f.xu.w.mask.nxv1f16.nxv1i32( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_f.xu.w_nxv1f16_nxv1i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_f.xu.w_nxv1f16_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfncvt.f.xu.w v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv1f16.nxv1i32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.f.xu.w.nxv2f16.nxv2i32( - , - i64); - -define @intrinsic_vfncvt_f.xu.w_nxv2f16_nxv2i32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_f.xu.w_nxv2f16_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfncvt.f.xu.w v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.f.xu.w.nxv2f16.nxv2i32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.f.xu.w.mask.nxv2f16.nxv2i32( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_f.xu.w_nxv2f16_nxv2i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_f.xu.w_nxv2f16_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfncvt.f.xu.w v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv2f16.nxv2i32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.f.xu.w.nxv4f16.nxv4i32( - , - i64); - -define @intrinsic_vfncvt_f.xu.w_nxv4f16_nxv4i32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_f.xu.w_nxv4f16_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfncvt.f.xu.w v10, v8 -; CHECK-NEXT: vmv.v.v v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.f.xu.w.nxv4f16.nxv4i32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.f.xu.w.mask.nxv4f16.nxv4i32( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_f.xu.w_nxv4f16_nxv4i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_f.xu.w_nxv4f16_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfncvt.f.xu.w v8, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv4f16.nxv4i32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.f.xu.w.nxv8f16.nxv8i32( - , - i64); - -define @intrinsic_vfncvt_f.xu.w_nxv8f16_nxv8i32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_f.xu.w_nxv8f16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfncvt.f.xu.w v12, v8 -; CHECK-NEXT: vmv.v.v v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.f.xu.w.nxv8f16.nxv8i32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.f.xu.w.mask.nxv8f16.nxv8i32( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_f.xu.w_nxv8f16_nxv8i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_f.xu.w_nxv8f16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfncvt.f.xu.w v8, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv8f16.nxv8i32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.f.xu.w.nxv16f16.nxv16i32( - , - i64); - -define @intrinsic_vfncvt_f.xu.w_nxv16f16_nxv16i32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_f.xu.w_nxv16f16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfncvt.f.xu.w v16, v8 -; CHECK-NEXT: vmv.v.v v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.f.xu.w.nxv16f16.nxv16i32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.f.xu.w.mask.nxv16f16.nxv16i32( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_f.xu.w_nxv16f16_nxv16i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_f.xu.w_nxv16f16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfncvt.f.xu.w v8, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv16f16.nxv16i32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.f.xu.w.nxv1f32.nxv1i64( - , - i64); - -define @intrinsic_vfncvt_f.xu.w_nxv1f32_nxv1i64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_f.xu.w_nxv1f32_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfncvt.f.xu.w v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.f.xu.w.nxv1f32.nxv1i64( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.f.xu.w.mask.nxv1f32.nxv1i64( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_f.xu.w_nxv1f32_nxv1i64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_f.xu.w_nxv1f32_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfncvt.f.xu.w v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv1f32.nxv1i64( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.f.xu.w.nxv2f32.nxv2i64( - , - i64); - -define @intrinsic_vfncvt_f.xu.w_nxv2f32_nxv2i64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_f.xu.w_nxv2f32_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfncvt.f.xu.w v10, v8 -; CHECK-NEXT: vmv.v.v v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.f.xu.w.nxv2f32.nxv2i64( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.f.xu.w.mask.nxv2f32.nxv2i64( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_f.xu.w_nxv2f32_nxv2i64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_f.xu.w_nxv2f32_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfncvt.f.xu.w v8, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv2f32.nxv2i64( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.f.xu.w.nxv4f32.nxv4i64( - , - i64); - -define @intrinsic_vfncvt_f.xu.w_nxv4f32_nxv4i64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_f.xu.w_nxv4f32_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfncvt.f.xu.w v12, v8 -; CHECK-NEXT: vmv.v.v v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.f.xu.w.nxv4f32.nxv4i64( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.f.xu.w.mask.nxv4f32.nxv4i64( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_f.xu.w_nxv4f32_nxv4i64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_f.xu.w_nxv4f32_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfncvt.f.xu.w v8, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv4f32.nxv4i64( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.f.xu.w.nxv8f32.nxv8i64( - , - i64); - -define @intrinsic_vfncvt_f.xu.w_nxv8f32_nxv8i64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_f.xu.w_nxv8f32_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfncvt.f.xu.w v16, v8 -; CHECK-NEXT: vmv.v.v v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.f.xu.w.nxv8f32.nxv8i64( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.f.xu.w.mask.nxv8f32.nxv8i64( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_f.xu.w_nxv8f32_nxv8i64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_f.xu.w_nxv8f32_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfncvt.f.xu.w v8, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv8f32.nxv8i64( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-xu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-xu.ll similarity index 87% rename from llvm/test/CodeGen/RISCV/rvv/vfncvt-f-xu-rv32.ll rename to llvm/test/CodeGen/RISCV/rvv/vfncvt-f-xu.ll index 4835d4e5c5916bdf5e19903b9cef2d31f1a550bc..e6842b749492cd7b5380fa7dba5a051a04fd55bf 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-xu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-xu.ll @@ -1,11 +1,13 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfncvt.f.xu.w.nxv1f16.nxv1i32( , - i32); + iXLen); -define @intrinsic_vfncvt_f.xu.w_nxv1f16_nxv1i32( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_f.xu.w_nxv1f16_nxv1i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_f.xu.w_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -15,7 +17,7 @@ define @intrinsic_vfncvt_f.xu.w_nxv1f16_nxv1i32( @llvm.riscv.vfncvt.f.xu.w.nxv1f16.nxv1i32( %0, - i32 %1) + iXLen %1) ret %a } @@ -24,10 +26,10 @@ declare @llvm.riscv.vfncvt.f.xu.w.mask.nxv1f16.nxv1i32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_f.xu.w_nxv1f16_nxv1i32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_f.xu.w_nxv1f16_nxv1i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.xu.w_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -38,16 +40,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.f.xu.w.nxv2f16.nxv2i32( , - i32); + iXLen); -define @intrinsic_vfncvt_f.xu.w_nxv2f16_nxv2i32( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_f.xu.w_nxv2f16_nxv2i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_f.xu.w_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -57,7 +59,7 @@ define @intrinsic_vfncvt_f.xu.w_nxv2f16_nxv2i32( @llvm.riscv.vfncvt.f.xu.w.nxv2f16.nxv2i32( %0, - i32 %1) + iXLen %1) ret %a } @@ -66,10 +68,10 @@ declare @llvm.riscv.vfncvt.f.xu.w.mask.nxv2f16.nxv2i32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_f.xu.w_nxv2f16_nxv2i32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_f.xu.w_nxv2f16_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.xu.w_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -80,16 +82,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.f.xu.w.nxv4f16.nxv4i32( , - i32); + iXLen); -define @intrinsic_vfncvt_f.xu.w_nxv4f16_nxv4i32( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_f.xu.w_nxv4f16_nxv4i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_f.xu.w_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -99,7 +101,7 @@ define @intrinsic_vfncvt_f.xu.w_nxv4f16_nxv4i32( @llvm.riscv.vfncvt.f.xu.w.nxv4f16.nxv4i32( %0, - i32 %1) + iXLen %1) ret %a } @@ -108,10 +110,10 @@ declare @llvm.riscv.vfncvt.f.xu.w.mask.nxv4f16.nxv4i32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_f.xu.w_nxv4f16_nxv4i32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_f.xu.w_nxv4f16_nxv4i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.xu.w_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -122,16 +124,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.f.xu.w.nxv8f16.nxv8i32( , - i32); + iXLen); -define @intrinsic_vfncvt_f.xu.w_nxv8f16_nxv8i32( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_f.xu.w_nxv8f16_nxv8i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_f.xu.w_nxv8f16_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -141,7 +143,7 @@ define @intrinsic_vfncvt_f.xu.w_nxv8f16_nxv8i32( @llvm.riscv.vfncvt.f.xu.w.nxv8f16.nxv8i32( %0, - i32 %1) + iXLen %1) ret %a } @@ -150,10 +152,10 @@ declare @llvm.riscv.vfncvt.f.xu.w.mask.nxv8f16.nxv8i32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_f.xu.w_nxv8f16_nxv8i32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_f.xu.w_nxv8f16_nxv8i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.xu.w_nxv8f16_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -164,16 +166,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.f.xu.w.nxv16f16.nxv16i32( , - i32); + iXLen); -define @intrinsic_vfncvt_f.xu.w_nxv16f16_nxv16i32( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_f.xu.w_nxv16f16_nxv16i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_f.xu.w_nxv16f16_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -183,7 +185,7 @@ define @intrinsic_vfncvt_f.xu.w_nxv16f16_nxv16i32( @llvm.riscv.vfncvt.f.xu.w.nxv16f16.nxv16i32( %0, - i32 %1) + iXLen %1) ret %a } @@ -192,10 +194,10 @@ declare @llvm.riscv.vfncvt.f.xu.w.mask.nxv16f16.nxv16i32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_f.xu.w_nxv16f16_nxv16i32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_f.xu.w_nxv16f16_nxv16i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.xu.w_nxv16f16_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -206,16 +208,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.f.xu.w.nxv1f32.nxv1i64( , - i32); + iXLen); -define @intrinsic_vfncvt_f.xu.w_nxv1f32_nxv1i64( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_f.xu.w_nxv1f32_nxv1i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_f.xu.w_nxv1f32_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -225,7 +227,7 @@ define @intrinsic_vfncvt_f.xu.w_nxv1f32_nxv1i64( @llvm.riscv.vfncvt.f.xu.w.nxv1f32.nxv1i64( %0, - i32 %1) + iXLen %1) ret %a } @@ -234,10 +236,10 @@ declare @llvm.riscv.vfncvt.f.xu.w.mask.nxv1f32.nxv1i64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_f.xu.w_nxv1f32_nxv1i64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_f.xu.w_nxv1f32_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.xu.w_nxv1f32_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -248,16 +250,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.f.xu.w.nxv2f32.nxv2i64( , - i32); + iXLen); -define @intrinsic_vfncvt_f.xu.w_nxv2f32_nxv2i64( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_f.xu.w_nxv2f32_nxv2i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_f.xu.w_nxv2f32_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -267,7 +269,7 @@ define @intrinsic_vfncvt_f.xu.w_nxv2f32_nxv2i64( @llvm.riscv.vfncvt.f.xu.w.nxv2f32.nxv2i64( %0, - i32 %1) + iXLen %1) ret %a } @@ -276,10 +278,10 @@ declare @llvm.riscv.vfncvt.f.xu.w.mask.nxv2f32.nxv2i64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_f.xu.w_nxv2f32_nxv2i64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_f.xu.w_nxv2f32_nxv2i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.xu.w_nxv2f32_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -290,16 +292,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.f.xu.w.nxv4f32.nxv4i64( , - i32); + iXLen); -define @intrinsic_vfncvt_f.xu.w_nxv4f32_nxv4i64( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_f.xu.w_nxv4f32_nxv4i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_f.xu.w_nxv4f32_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -309,7 +311,7 @@ define @intrinsic_vfncvt_f.xu.w_nxv4f32_nxv4i64( @llvm.riscv.vfncvt.f.xu.w.nxv4f32.nxv4i64( %0, - i32 %1) + iXLen %1) ret %a } @@ -318,10 +320,10 @@ declare @llvm.riscv.vfncvt.f.xu.w.mask.nxv4f32.nxv4i64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_f.xu.w_nxv4f32_nxv4i64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_f.xu.w_nxv4f32_nxv4i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.xu.w_nxv4f32_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -332,16 +334,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.f.xu.w.nxv8f32.nxv8i64( , - i32); + iXLen); -define @intrinsic_vfncvt_f.xu.w_nxv8f32_nxv8i64( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_f.xu.w_nxv8f32_nxv8i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_f.xu.w_nxv8f32_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -351,7 +353,7 @@ define @intrinsic_vfncvt_f.xu.w_nxv8f32_nxv8i64( @llvm.riscv.vfncvt.f.xu.w.nxv8f32.nxv8i64( %0, - i32 %1) + iXLen %1) ret %a } @@ -360,10 +362,10 @@ declare @llvm.riscv.vfncvt.f.xu.w.mask.nxv8f32.nxv8i64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_f.xu.w_nxv8f32_nxv8i64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_f.xu.w_nxv8f32_nxv8i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.xu.w_nxv8f32_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -374,7 +376,7 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-rod-f-f-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rod-f-f-rv64.ll deleted file mode 100644 index 4020c1d5d1a34a9497da3e4fa84d06a47542c924..0000000000000000000000000000000000000000 --- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-rod-f-f-rv64.ll +++ /dev/null @@ -1,380 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s -declare @llvm.riscv.vfncvt.rod.f.f.w.nxv1f16.nxv1f32( - , - i64); - -define @intrinsic_vfncvt_rod.f.f.w_nxv1f16_nxv1f32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_rod.f.f.w_nxv1f16_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfncvt.rod.f.f.w v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.rod.f.f.w.nxv1f16.nxv1f32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1f16.nxv1f32( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_rod.f.f.w_nxv1f16_nxv1f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_rod.f.f.w_nxv1f16_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfncvt.rod.f.f.w v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1f16.nxv1f32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.rod.f.f.w.nxv2f16.nxv2f32( - , - i64); - -define @intrinsic_vfncvt_rod.f.f.w_nxv2f16_nxv2f32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_rod.f.f.w_nxv2f16_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfncvt.rod.f.f.w v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.rod.f.f.w.nxv2f16.nxv2f32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2f16.nxv2f32( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_rod.f.f.w_nxv2f16_nxv2f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_rod.f.f.w_nxv2f16_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfncvt.rod.f.f.w v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2f16.nxv2f32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.rod.f.f.w.nxv4f16.nxv4f32( - , - i64); - -define @intrinsic_vfncvt_rod.f.f.w_nxv4f16_nxv4f32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_rod.f.f.w_nxv4f16_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfncvt.rod.f.f.w v10, v8 -; CHECK-NEXT: vmv.v.v v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.rod.f.f.w.nxv4f16.nxv4f32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4f16.nxv4f32( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_rod.f.f.w_nxv4f16_nxv4f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_rod.f.f.w_nxv4f16_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfncvt.rod.f.f.w v8, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4f16.nxv4f32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.rod.f.f.w.nxv8f16.nxv8f32( - , - i64); - -define @intrinsic_vfncvt_rod.f.f.w_nxv8f16_nxv8f32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_rod.f.f.w_nxv8f16_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfncvt.rod.f.f.w v12, v8 -; CHECK-NEXT: vmv.v.v v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.rod.f.f.w.nxv8f16.nxv8f32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8f16.nxv8f32( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_rod.f.f.w_nxv8f16_nxv8f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_rod.f.f.w_nxv8f16_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfncvt.rod.f.f.w v8, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8f16.nxv8f32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.rod.f.f.w.nxv16f16.nxv16f32( - , - i64); - -define @intrinsic_vfncvt_rod.f.f.w_nxv16f16_nxv16f32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_rod.f.f.w_nxv16f16_nxv16f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfncvt.rod.f.f.w v16, v8 -; CHECK-NEXT: vmv.v.v v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.rod.f.f.w.nxv16f16.nxv16f32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv16f16.nxv16f32( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_rod.f.f.w_nxv16f16_nxv16f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_rod.f.f.w_nxv16f16_nxv16f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfncvt.rod.f.f.w v8, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv16f16.nxv16f32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.rod.f.f.w.nxv1f32.nxv1f64( - , - i64); - -define @intrinsic_vfncvt_rod.f.f.w_nxv1f32_nxv1f64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_rod.f.f.w_nxv1f32_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfncvt.rod.f.f.w v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.rod.f.f.w.nxv1f32.nxv1f64( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1f32.nxv1f64( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_rod.f.f.w_nxv1f32_nxv1f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_rod.f.f.w_nxv1f32_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfncvt.rod.f.f.w v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1f32.nxv1f64( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.rod.f.f.w.nxv2f32.nxv2f64( - , - i64); - -define @intrinsic_vfncvt_rod.f.f.w_nxv2f32_nxv2f64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_rod.f.f.w_nxv2f32_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfncvt.rod.f.f.w v10, v8 -; CHECK-NEXT: vmv.v.v v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.rod.f.f.w.nxv2f32.nxv2f64( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2f32.nxv2f64( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_rod.f.f.w_nxv2f32_nxv2f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_rod.f.f.w_nxv2f32_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfncvt.rod.f.f.w v8, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2f32.nxv2f64( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.rod.f.f.w.nxv4f32.nxv4f64( - , - i64); - -define @intrinsic_vfncvt_rod.f.f.w_nxv4f32_nxv4f64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_rod.f.f.w_nxv4f32_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfncvt.rod.f.f.w v12, v8 -; CHECK-NEXT: vmv.v.v v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.rod.f.f.w.nxv4f32.nxv4f64( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4f32.nxv4f64( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_rod.f.f.w_nxv4f32_nxv4f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_rod.f.f.w_nxv4f32_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfncvt.rod.f.f.w v8, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4f32.nxv4f64( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.rod.f.f.w.nxv8f32.nxv8f64( - , - i64); - -define @intrinsic_vfncvt_rod.f.f.w_nxv8f32_nxv8f64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_rod.f.f.w_nxv8f32_nxv8f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfncvt.rod.f.f.w v16, v8 -; CHECK-NEXT: vmv.v.v v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.rod.f.f.w.nxv8f32.nxv8f64( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8f32.nxv8f64( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_rod.f.f.w_nxv8f32_nxv8f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_rod.f.f.w_nxv8f32_nxv8f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfncvt.rod.f.f.w v8, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8f32.nxv8f64( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-rod-f-f-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rod-f-f.ll similarity index 86% rename from llvm/test/CodeGen/RISCV/rvv/vfncvt-rod-f-f-rv32.ll rename to llvm/test/CodeGen/RISCV/rvv/vfncvt-rod-f-f.ll index b464fdde6db253bc4b7bbc7930efb1b532c2a194..2a7c30939f1086b8a976f15c7da88bcde9caf5ed 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-rod-f-f-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rod-f-f.ll @@ -1,11 +1,13 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfncvt.rod.f.f.w.nxv1f16.nxv1f32( , - i32); + iXLen); -define @intrinsic_vfncvt_rod.f.f.w_nxv1f16_nxv1f32( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_rod.f.f.w_nxv1f16_nxv1f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rod.f.f.w_nxv1f16_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -15,7 +17,7 @@ define @intrinsic_vfncvt_rod.f.f.w_nxv1f16_nxv1f32( @llvm.riscv.vfncvt.rod.f.f.w.nxv1f16.nxv1f32( %0, - i32 %1) + iXLen %1) ret %a } @@ -24,10 +26,10 @@ declare @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1f16.nxv1f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_rod.f.f.w_nxv1f16_nxv1f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_rod.f.f.w_nxv1f16_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rod.f.f.w_nxv1f16_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -38,16 +40,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.rod.f.f.w.nxv2f16.nxv2f32( , - i32); + iXLen); -define @intrinsic_vfncvt_rod.f.f.w_nxv2f16_nxv2f32( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_rod.f.f.w_nxv2f16_nxv2f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rod.f.f.w_nxv2f16_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -57,7 +59,7 @@ define @intrinsic_vfncvt_rod.f.f.w_nxv2f16_nxv2f32( @llvm.riscv.vfncvt.rod.f.f.w.nxv2f16.nxv2f32( %0, - i32 %1) + iXLen %1) ret %a } @@ -66,10 +68,10 @@ declare @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2f16.nxv2f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_rod.f.f.w_nxv2f16_nxv2f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_rod.f.f.w_nxv2f16_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rod.f.f.w_nxv2f16_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -80,16 +82,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.rod.f.f.w.nxv4f16.nxv4f32( , - i32); + iXLen); -define @intrinsic_vfncvt_rod.f.f.w_nxv4f16_nxv4f32( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_rod.f.f.w_nxv4f16_nxv4f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rod.f.f.w_nxv4f16_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -99,7 +101,7 @@ define @intrinsic_vfncvt_rod.f.f.w_nxv4f16_nxv4f32( @llvm.riscv.vfncvt.rod.f.f.w.nxv4f16.nxv4f32( %0, - i32 %1) + iXLen %1) ret %a } @@ -108,10 +110,10 @@ declare @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4f16.nxv4f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_rod.f.f.w_nxv4f16_nxv4f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_rod.f.f.w_nxv4f16_nxv4f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rod.f.f.w_nxv4f16_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -122,16 +124,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.rod.f.f.w.nxv8f16.nxv8f32( , - i32); + iXLen); -define @intrinsic_vfncvt_rod.f.f.w_nxv8f16_nxv8f32( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_rod.f.f.w_nxv8f16_nxv8f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rod.f.f.w_nxv8f16_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -141,7 +143,7 @@ define @intrinsic_vfncvt_rod.f.f.w_nxv8f16_nxv8f32( @llvm.riscv.vfncvt.rod.f.f.w.nxv8f16.nxv8f32( %0, - i32 %1) + iXLen %1) ret %a } @@ -150,10 +152,10 @@ declare @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8f16.nxv8f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_rod.f.f.w_nxv8f16_nxv8f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_rod.f.f.w_nxv8f16_nxv8f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rod.f.f.w_nxv8f16_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -164,16 +166,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.rod.f.f.w.nxv16f16.nxv16f32( , - i32); + iXLen); -define @intrinsic_vfncvt_rod.f.f.w_nxv16f16_nxv16f32( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_rod.f.f.w_nxv16f16_nxv16f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rod.f.f.w_nxv16f16_nxv16f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -183,7 +185,7 @@ define @intrinsic_vfncvt_rod.f.f.w_nxv16f16_nxv16f32( @llvm.riscv.vfncvt.rod.f.f.w.nxv16f16.nxv16f32( %0, - i32 %1) + iXLen %1) ret %a } @@ -192,10 +194,10 @@ declare @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv16f16.nxv16f32 , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_rod.f.f.w_nxv16f16_nxv16f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_rod.f.f.w_nxv16f16_nxv16f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rod.f.f.w_nxv16f16_nxv16f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -206,16 +208,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.rod.f.f.w.nxv1f32.nxv1f64( , - i32); + iXLen); -define @intrinsic_vfncvt_rod.f.f.w_nxv1f32_nxv1f64( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_rod.f.f.w_nxv1f32_nxv1f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rod.f.f.w_nxv1f32_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -225,7 +227,7 @@ define @intrinsic_vfncvt_rod.f.f.w_nxv1f32_nxv1f64( @llvm.riscv.vfncvt.rod.f.f.w.nxv1f32.nxv1f64( %0, - i32 %1) + iXLen %1) ret %a } @@ -234,10 +236,10 @@ declare @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1f32.nxv1f64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_rod.f.f.w_nxv1f32_nxv1f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_rod.f.f.w_nxv1f32_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rod.f.f.w_nxv1f32_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -248,16 +250,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.rod.f.f.w.nxv2f32.nxv2f64( , - i32); + iXLen); -define @intrinsic_vfncvt_rod.f.f.w_nxv2f32_nxv2f64( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_rod.f.f.w_nxv2f32_nxv2f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rod.f.f.w_nxv2f32_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -267,7 +269,7 @@ define @intrinsic_vfncvt_rod.f.f.w_nxv2f32_nxv2f64( @llvm.riscv.vfncvt.rod.f.f.w.nxv2f32.nxv2f64( %0, - i32 %1) + iXLen %1) ret %a } @@ -276,10 +278,10 @@ declare @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2f32.nxv2f64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_rod.f.f.w_nxv2f32_nxv2f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_rod.f.f.w_nxv2f32_nxv2f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rod.f.f.w_nxv2f32_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -290,16 +292,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.rod.f.f.w.nxv4f32.nxv4f64( , - i32); + iXLen); -define @intrinsic_vfncvt_rod.f.f.w_nxv4f32_nxv4f64( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_rod.f.f.w_nxv4f32_nxv4f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rod.f.f.w_nxv4f32_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -309,7 +311,7 @@ define @intrinsic_vfncvt_rod.f.f.w_nxv4f32_nxv4f64( @llvm.riscv.vfncvt.rod.f.f.w.nxv4f32.nxv4f64( %0, - i32 %1) + iXLen %1) ret %a } @@ -318,10 +320,10 @@ declare @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4f32.nxv4f64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_rod.f.f.w_nxv4f32_nxv4f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_rod.f.f.w_nxv4f32_nxv4f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rod.f.f.w_nxv4f32_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -332,16 +334,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.rod.f.f.w.nxv8f32.nxv8f64( , - i32); + iXLen); -define @intrinsic_vfncvt_rod.f.f.w_nxv8f32_nxv8f64( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_rod.f.f.w_nxv8f32_nxv8f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rod.f.f.w_nxv8f32_nxv8f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -351,7 +353,7 @@ define @intrinsic_vfncvt_rod.f.f.w_nxv8f32_nxv8f64( @llvm.riscv.vfncvt.rod.f.f.w.nxv8f32.nxv8f64( %0, - i32 %1) + iXLen %1) ret %a } @@ -360,10 +362,10 @@ declare @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8f32.nxv8f64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_rod.f.f.w_nxv8f32_nxv8f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_rod.f.f.w_nxv8f32_nxv8f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rod.f.f.w_nxv8f32_nxv8f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -374,7 +376,7 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-x-f-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-x-f-rv64.ll deleted file mode 100644 index ad695704aae0a4f997b417c239d0388a325425bc..0000000000000000000000000000000000000000 --- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-x-f-rv64.ll +++ /dev/null @@ -1,632 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s -declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i8.nxv1f16( - , - i64); - -define @intrinsic_vfncvt_rtz.x.f.w_nxv1i8_nxv1f16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv1i8_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu -; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i8.nxv1f16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i8.nxv1f16( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv1i8_nxv1f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv1i8_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu -; CHECK-NEXT: vfncvt.rtz.x.f.w v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i8.nxv1f16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i8.nxv2f16( - , - i64); - -define @intrinsic_vfncvt_rtz.x.f.w_nxv2i8_nxv2f16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv2i8_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu -; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i8.nxv2f16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i8.nxv2f16( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv2i8_nxv2f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv2i8_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu -; CHECK-NEXT: vfncvt.rtz.x.f.w v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i8.nxv2f16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i8.nxv4f16( - , - i64); - -define @intrinsic_vfncvt_rtz.x.f.w_nxv4i8_nxv4f16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv4i8_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu -; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i8.nxv4f16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i8.nxv4f16( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv4i8_nxv4f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv4i8_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu -; CHECK-NEXT: vfncvt.rtz.x.f.w v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i8.nxv4f16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i8.nxv8f16( - , - i64); - -define @intrinsic_vfncvt_rtz.x.f.w_nxv8i8_nxv8f16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv8i8_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu -; CHECK-NEXT: vfncvt.rtz.x.f.w v10, v8 -; CHECK-NEXT: vmv.v.v v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i8.nxv8f16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i8.nxv8f16( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv8i8_nxv8f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv8i8_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu -; CHECK-NEXT: vfncvt.rtz.x.f.w v8, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i8.nxv8f16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i8.nxv16f16( - , - i64); - -define @intrinsic_vfncvt_rtz.x.f.w_nxv16i8_nxv16f16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv16i8_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu -; CHECK-NEXT: vfncvt.rtz.x.f.w v12, v8 -; CHECK-NEXT: vmv.v.v v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i8.nxv16f16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i8.nxv16f16( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv16i8_nxv16f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv16i8_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu -; CHECK-NEXT: vfncvt.rtz.x.f.w v8, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i8.nxv16f16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv32i8.nxv32f16( - , - i64); - -define @intrinsic_vfncvt_rtz.x.f.w_nxv32i8_nxv32f16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv32i8_nxv32f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu -; CHECK-NEXT: vfncvt.rtz.x.f.w v16, v8 -; CHECK-NEXT: vmv.v.v v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv32i8.nxv32f16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv32i8.nxv32f16( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv32i8_nxv32f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv32i8_nxv32f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu -; CHECK-NEXT: vfncvt.rtz.x.f.w v8, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv32i8.nxv32f16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i16.nxv1f32( - , - i64); - -define @intrinsic_vfncvt_rtz.x.f.w_nxv1i16_nxv1f32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv1i16_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i16.nxv1f32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i16.nxv1f32( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv1i16_nxv1f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv1i16_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfncvt.rtz.x.f.w v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i16.nxv1f32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i16.nxv2f32( - , - i64); - -define @intrinsic_vfncvt_rtz.x.f.w_nxv2i16_nxv2f32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv2i16_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i16.nxv2f32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i16.nxv2f32( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv2i16_nxv2f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv2i16_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfncvt.rtz.x.f.w v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i16.nxv2f32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i16.nxv4f32( - , - i64); - -define @intrinsic_vfncvt_rtz.x.f.w_nxv4i16_nxv4f32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv4i16_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfncvt.rtz.x.f.w v10, v8 -; CHECK-NEXT: vmv.v.v v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i16.nxv4f32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i16.nxv4f32( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv4i16_nxv4f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv4i16_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfncvt.rtz.x.f.w v8, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i16.nxv4f32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i16.nxv8f32( - , - i64); - -define @intrinsic_vfncvt_rtz.x.f.w_nxv8i16_nxv8f32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv8i16_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfncvt.rtz.x.f.w v12, v8 -; CHECK-NEXT: vmv.v.v v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i16.nxv8f32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i16.nxv8f32( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv8i16_nxv8f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv8i16_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfncvt.rtz.x.f.w v8, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i16.nxv8f32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i16.nxv16f32( - , - i64); - -define @intrinsic_vfncvt_rtz.x.f.w_nxv16i16_nxv16f32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv16i16_nxv16f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfncvt.rtz.x.f.w v16, v8 -; CHECK-NEXT: vmv.v.v v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i16.nxv16f32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i16.nxv16f32( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv16i16_nxv16f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv16i16_nxv16f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfncvt.rtz.x.f.w v8, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i16.nxv16f32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i32.nxv1f64( - , - i64); - -define @intrinsic_vfncvt_rtz.x.f.w_nxv1i32_nxv1f64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv1i32_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i32.nxv1f64( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i32.nxv1f64( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv1i32_nxv1f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv1i32_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfncvt.rtz.x.f.w v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i32.nxv1f64( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i32.nxv2f64( - , - i64); - -define @intrinsic_vfncvt_rtz.x.f.w_nxv2i32_nxv2f64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv2i32_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfncvt.rtz.x.f.w v10, v8 -; CHECK-NEXT: vmv.v.v v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i32.nxv2f64( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i32.nxv2f64( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv2i32_nxv2f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv2i32_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfncvt.rtz.x.f.w v8, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i32.nxv2f64( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i32.nxv4f64( - , - i64); - -define @intrinsic_vfncvt_rtz.x.f.w_nxv4i32_nxv4f64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv4i32_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfncvt.rtz.x.f.w v12, v8 -; CHECK-NEXT: vmv.v.v v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i32.nxv4f64( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i32.nxv4f64( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv4i32_nxv4f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv4i32_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfncvt.rtz.x.f.w v8, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i32.nxv4f64( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i32.nxv8f64( - , - i64); - -define @intrinsic_vfncvt_rtz.x.f.w_nxv8i32_nxv8f64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv8i32_nxv8f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfncvt.rtz.x.f.w v16, v8 -; CHECK-NEXT: vmv.v.v v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i32.nxv8f64( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i32.nxv8f64( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv8i32_nxv8f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv8i32_nxv8f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfncvt.rtz.x.f.w v8, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i32.nxv8f64( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-x-f-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-x-f.ll similarity index 87% rename from llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-x-f-rv32.ll rename to llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-x-f.ll index 227210e6f2f015471e96cb702528fcb10d043be6..9a14df186dd512fcdec7bdd65cb2a3d103f29c75 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-x-f-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-x-f.ll @@ -1,11 +1,13 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i8.nxv1f16( , - i32); + iXLen); -define @intrinsic_vfncvt_rtz.x.f.w_nxv1i8_nxv1f16( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_rtz.x.f.w_nxv1i8_nxv1f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv1i8_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu @@ -15,7 +17,7 @@ define @intrinsic_vfncvt_rtz.x.f.w_nxv1i8_nxv1f16( @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i8.nxv1f16( %0, - i32 %1) + iXLen %1) ret %a } @@ -24,10 +26,10 @@ declare @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i8.nxv1f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv1i8_nxv1f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv1i8_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv1i8_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu @@ -38,16 +40,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i8.nxv2f16( , - i32); + iXLen); -define @intrinsic_vfncvt_rtz.x.f.w_nxv2i8_nxv2f16( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_rtz.x.f.w_nxv2i8_nxv2f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv2i8_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu @@ -57,7 +59,7 @@ define @intrinsic_vfncvt_rtz.x.f.w_nxv2i8_nxv2f16( @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i8.nxv2f16( %0, - i32 %1) + iXLen %1) ret %a } @@ -66,10 +68,10 @@ declare @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i8.nxv2f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv2i8_nxv2f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv2i8_nxv2f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv2i8_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu @@ -80,16 +82,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i8.nxv4f16( , - i32); + iXLen); -define @intrinsic_vfncvt_rtz.x.f.w_nxv4i8_nxv4f16( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_rtz.x.f.w_nxv4i8_nxv4f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv4i8_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu @@ -99,7 +101,7 @@ define @intrinsic_vfncvt_rtz.x.f.w_nxv4i8_nxv4f16( @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i8.nxv4f16( %0, - i32 %1) + iXLen %1) ret %a } @@ -108,10 +110,10 @@ declare @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i8.nxv4f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv4i8_nxv4f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv4i8_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv4i8_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu @@ -122,16 +124,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i8.nxv8f16( , - i32); + iXLen); -define @intrinsic_vfncvt_rtz.x.f.w_nxv8i8_nxv8f16( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_rtz.x.f.w_nxv8i8_nxv8f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv8i8_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu @@ -141,7 +143,7 @@ define @intrinsic_vfncvt_rtz.x.f.w_nxv8i8_nxv8f16( @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i8.nxv8f16( %0, - i32 %1) + iXLen %1) ret %a } @@ -150,10 +152,10 @@ declare @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i8.nxv8f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv8i8_nxv8f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv8i8_nxv8f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv8i8_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu @@ -164,16 +166,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i8.nxv16f16( , - i32); + iXLen); -define @intrinsic_vfncvt_rtz.x.f.w_nxv16i8_nxv16f16( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_rtz.x.f.w_nxv16i8_nxv16f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv16i8_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu @@ -183,7 +185,7 @@ define @intrinsic_vfncvt_rtz.x.f.w_nxv16i8_nxv16f16( @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i8.nxv16f16( %0, - i32 %1) + iXLen %1) ret %a } @@ -192,10 +194,10 @@ declare @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i8.nxv16f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv16i8_nxv16f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv16i8_nxv16f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv16i8_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu @@ -206,16 +208,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv32i8.nxv32f16( , - i32); + iXLen); -define @intrinsic_vfncvt_rtz.x.f.w_nxv32i8_nxv32f16( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_rtz.x.f.w_nxv32i8_nxv32f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv32i8_nxv32f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu @@ -225,7 +227,7 @@ define @intrinsic_vfncvt_rtz.x.f.w_nxv32i8_nxv32f16( @llvm.riscv.vfncvt.rtz.x.f.w.nxv32i8.nxv32f16( %0, - i32 %1) + iXLen %1) ret %a } @@ -234,10 +236,10 @@ declare @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv32i8.nxv32f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv32i8_nxv32f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv32i8_nxv32f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv32i8_nxv32f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu @@ -248,16 +250,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i16.nxv1f32( , - i32); + iXLen); -define @intrinsic_vfncvt_rtz.x.f.w_nxv1i16_nxv1f32( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_rtz.x.f.w_nxv1i16_nxv1f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv1i16_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -267,7 +269,7 @@ define @intrinsic_vfncvt_rtz.x.f.w_nxv1i16_nxv1f32( @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i16.nxv1f32( %0, - i32 %1) + iXLen %1) ret %a } @@ -276,10 +278,10 @@ declare @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i16.nxv1f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv1i16_nxv1f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv1i16_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv1i16_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -290,16 +292,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i16.nxv2f32( , - i32); + iXLen); -define @intrinsic_vfncvt_rtz.x.f.w_nxv2i16_nxv2f32( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_rtz.x.f.w_nxv2i16_nxv2f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv2i16_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -309,7 +311,7 @@ define @intrinsic_vfncvt_rtz.x.f.w_nxv2i16_nxv2f32( @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i16.nxv2f32( %0, - i32 %1) + iXLen %1) ret %a } @@ -318,10 +320,10 @@ declare @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i16.nxv2f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv2i16_nxv2f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv2i16_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv2i16_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -332,16 +334,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i16.nxv4f32( , - i32); + iXLen); -define @intrinsic_vfncvt_rtz.x.f.w_nxv4i16_nxv4f32( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_rtz.x.f.w_nxv4i16_nxv4f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv4i16_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -351,7 +353,7 @@ define @intrinsic_vfncvt_rtz.x.f.w_nxv4i16_nxv4f32( @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i16.nxv4f32( %0, - i32 %1) + iXLen %1) ret %a } @@ -360,10 +362,10 @@ declare @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i16.nxv4f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv4i16_nxv4f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv4i16_nxv4f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv4i16_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -374,16 +376,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i16.nxv8f32( , - i32); + iXLen); -define @intrinsic_vfncvt_rtz.x.f.w_nxv8i16_nxv8f32( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_rtz.x.f.w_nxv8i16_nxv8f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv8i16_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -393,7 +395,7 @@ define @intrinsic_vfncvt_rtz.x.f.w_nxv8i16_nxv8f32( @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i16.nxv8f32( %0, - i32 %1) + iXLen %1) ret %a } @@ -402,10 +404,10 @@ declare @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i16.nxv8f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv8i16_nxv8f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv8i16_nxv8f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv8i16_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -416,16 +418,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i16.nxv16f32( , - i32); + iXLen); -define @intrinsic_vfncvt_rtz.x.f.w_nxv16i16_nxv16f32( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_rtz.x.f.w_nxv16i16_nxv16f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv16i16_nxv16f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -435,7 +437,7 @@ define @intrinsic_vfncvt_rtz.x.f.w_nxv16i16_nxv16f32( @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i16.nxv16f32( %0, - i32 %1) + iXLen %1) ret %a } @@ -444,10 +446,10 @@ declare @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i16.nxv16f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv16i16_nxv16f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv16i16_nxv16f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv16i16_nxv16f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -458,16 +460,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i32.nxv1f64( , - i32); + iXLen); -define @intrinsic_vfncvt_rtz.x.f.w_nxv1i32_nxv1f64( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_rtz.x.f.w_nxv1i32_nxv1f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv1i32_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -477,7 +479,7 @@ define @intrinsic_vfncvt_rtz.x.f.w_nxv1i32_nxv1f64( @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i32.nxv1f64( %0, - i32 %1) + iXLen %1) ret %a } @@ -486,10 +488,10 @@ declare @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i32.nxv1f64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv1i32_nxv1f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv1i32_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv1i32_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -500,16 +502,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i32.nxv2f64( , - i32); + iXLen); -define @intrinsic_vfncvt_rtz.x.f.w_nxv2i32_nxv2f64( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_rtz.x.f.w_nxv2i32_nxv2f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv2i32_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -519,7 +521,7 @@ define @intrinsic_vfncvt_rtz.x.f.w_nxv2i32_nxv2f64( @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i32.nxv2f64( %0, - i32 %1) + iXLen %1) ret %a } @@ -528,10 +530,10 @@ declare @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i32.nxv2f64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv2i32_nxv2f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv2i32_nxv2f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv2i32_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -542,16 +544,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i32.nxv4f64( , - i32); + iXLen); -define @intrinsic_vfncvt_rtz.x.f.w_nxv4i32_nxv4f64( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_rtz.x.f.w_nxv4i32_nxv4f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv4i32_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -561,7 +563,7 @@ define @intrinsic_vfncvt_rtz.x.f.w_nxv4i32_nxv4f64( @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i32.nxv4f64( %0, - i32 %1) + iXLen %1) ret %a } @@ -570,10 +572,10 @@ declare @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i32.nxv4f64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv4i32_nxv4f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv4i32_nxv4f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv4i32_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -584,16 +586,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i32.nxv8f64( , - i32); + iXLen); -define @intrinsic_vfncvt_rtz.x.f.w_nxv8i32_nxv8f64( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_rtz.x.f.w_nxv8i32_nxv8f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv8i32_nxv8f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -603,7 +605,7 @@ define @intrinsic_vfncvt_rtz.x.f.w_nxv8i32_nxv8f64( @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i32.nxv8f64( %0, - i32 %1) + iXLen %1) ret %a } @@ -612,10 +614,10 @@ declare @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i32.nxv8f64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv8i32_nxv8f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv8i32_nxv8f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv8i32_nxv8f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -626,7 +628,7 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-xu-f-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-xu-f-rv64.ll deleted file mode 100644 index f7f873dd05155574d1036af39250e7793fe075f1..0000000000000000000000000000000000000000 --- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-xu-f-rv64.ll +++ /dev/null @@ -1,632 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s -declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i8.nxv1f16( - , - i64); - -define @intrinsic_vfncvt_rtz.xu.f.w_nxv1i8_nxv1f16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv1i8_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu -; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i8.nxv1f16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i8.nxv1f16( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv1i8_nxv1f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv1i8_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu -; CHECK-NEXT: vfncvt.rtz.xu.f.w v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i8.nxv1f16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i8.nxv2f16( - , - i64); - -define @intrinsic_vfncvt_rtz.xu.f.w_nxv2i8_nxv2f16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv2i8_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu -; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i8.nxv2f16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i8.nxv2f16( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv2i8_nxv2f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv2i8_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu -; CHECK-NEXT: vfncvt.rtz.xu.f.w v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i8.nxv2f16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i8.nxv4f16( - , - i64); - -define @intrinsic_vfncvt_rtz.xu.f.w_nxv4i8_nxv4f16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv4i8_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu -; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i8.nxv4f16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i8.nxv4f16( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv4i8_nxv4f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv4i8_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu -; CHECK-NEXT: vfncvt.rtz.xu.f.w v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i8.nxv4f16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i8.nxv8f16( - , - i64); - -define @intrinsic_vfncvt_rtz.xu.f.w_nxv8i8_nxv8f16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv8i8_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu -; CHECK-NEXT: vfncvt.rtz.xu.f.w v10, v8 -; CHECK-NEXT: vmv.v.v v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i8.nxv8f16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i8.nxv8f16( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv8i8_nxv8f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv8i8_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu -; CHECK-NEXT: vfncvt.rtz.xu.f.w v8, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i8.nxv8f16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i8.nxv16f16( - , - i64); - -define @intrinsic_vfncvt_rtz.xu.f.w_nxv16i8_nxv16f16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv16i8_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu -; CHECK-NEXT: vfncvt.rtz.xu.f.w v12, v8 -; CHECK-NEXT: vmv.v.v v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i8.nxv16f16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i8.nxv16f16( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv16i8_nxv16f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv16i8_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu -; CHECK-NEXT: vfncvt.rtz.xu.f.w v8, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i8.nxv16f16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv32i8.nxv32f16( - , - i64); - -define @intrinsic_vfncvt_rtz.xu.f.w_nxv32i8_nxv32f16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv32i8_nxv32f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu -; CHECK-NEXT: vfncvt.rtz.xu.f.w v16, v8 -; CHECK-NEXT: vmv.v.v v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv32i8.nxv32f16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv32i8.nxv32f16( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv32i8_nxv32f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv32i8_nxv32f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu -; CHECK-NEXT: vfncvt.rtz.xu.f.w v8, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv32i8.nxv32f16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i16.nxv1f32( - , - i64); - -define @intrinsic_vfncvt_rtz.xu.f.w_nxv1i16_nxv1f32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv1i16_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i16.nxv1f32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i16.nxv1f32( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv1i16_nxv1f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv1i16_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfncvt.rtz.xu.f.w v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i16.nxv1f32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i16.nxv2f32( - , - i64); - -define @intrinsic_vfncvt_rtz.xu.f.w_nxv2i16_nxv2f32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv2i16_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i16.nxv2f32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i16.nxv2f32( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv2i16_nxv2f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv2i16_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfncvt.rtz.xu.f.w v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i16.nxv2f32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i16.nxv4f32( - , - i64); - -define @intrinsic_vfncvt_rtz.xu.f.w_nxv4i16_nxv4f32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv4i16_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfncvt.rtz.xu.f.w v10, v8 -; CHECK-NEXT: vmv.v.v v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i16.nxv4f32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i16.nxv4f32( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv4i16_nxv4f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv4i16_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfncvt.rtz.xu.f.w v8, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i16.nxv4f32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i16.nxv8f32( - , - i64); - -define @intrinsic_vfncvt_rtz.xu.f.w_nxv8i16_nxv8f32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv8i16_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfncvt.rtz.xu.f.w v12, v8 -; CHECK-NEXT: vmv.v.v v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i16.nxv8f32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i16.nxv8f32( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv8i16_nxv8f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv8i16_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfncvt.rtz.xu.f.w v8, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i16.nxv8f32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i16.nxv16f32( - , - i64); - -define @intrinsic_vfncvt_rtz.xu.f.w_nxv16i16_nxv16f32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv16i16_nxv16f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfncvt.rtz.xu.f.w v16, v8 -; CHECK-NEXT: vmv.v.v v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i16.nxv16f32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i16.nxv16f32( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv16i16_nxv16f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv16i16_nxv16f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfncvt.rtz.xu.f.w v8, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i16.nxv16f32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i32.nxv1f64( - , - i64); - -define @intrinsic_vfncvt_rtz.xu.f.w_nxv1i32_nxv1f64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv1i32_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i32.nxv1f64( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i32.nxv1f64( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv1i32_nxv1f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv1i32_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfncvt.rtz.xu.f.w v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i32.nxv1f64( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i32.nxv2f64( - , - i64); - -define @intrinsic_vfncvt_rtz.xu.f.w_nxv2i32_nxv2f64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv2i32_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfncvt.rtz.xu.f.w v10, v8 -; CHECK-NEXT: vmv.v.v v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i32.nxv2f64( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i32.nxv2f64( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv2i32_nxv2f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv2i32_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfncvt.rtz.xu.f.w v8, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i32.nxv2f64( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i32.nxv4f64( - , - i64); - -define @intrinsic_vfncvt_rtz.xu.f.w_nxv4i32_nxv4f64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv4i32_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfncvt.rtz.xu.f.w v12, v8 -; CHECK-NEXT: vmv.v.v v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i32.nxv4f64( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i32.nxv4f64( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv4i32_nxv4f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv4i32_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfncvt.rtz.xu.f.w v8, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i32.nxv4f64( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i32.nxv8f64( - , - i64); - -define @intrinsic_vfncvt_rtz.xu.f.w_nxv8i32_nxv8f64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv8i32_nxv8f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfncvt.rtz.xu.f.w v16, v8 -; CHECK-NEXT: vmv.v.v v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i32.nxv8f64( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i32.nxv8f64( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv8i32_nxv8f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv8i32_nxv8f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfncvt.rtz.xu.f.w v8, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i32.nxv8f64( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-xu-f-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-xu-f.ll similarity index 87% rename from llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-xu-f-rv32.ll rename to llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-xu-f.ll index 4bfe331db7e006bddf2d12071bf7950baa2e13b2..24d75d6d09b190b7ca9ccd925a4c2ec0e09d685b 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-xu-f-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-xu-f.ll @@ -1,11 +1,13 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i8.nxv1f16( , - i32); + iXLen); -define @intrinsic_vfncvt_rtz.xu.f.w_nxv1i8_nxv1f16( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_rtz.xu.f.w_nxv1i8_nxv1f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv1i8_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu @@ -15,7 +17,7 @@ define @intrinsic_vfncvt_rtz.xu.f.w_nxv1i8_nxv1f16( @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i8.nxv1f16( %0, - i32 %1) + iXLen %1) ret %a } @@ -24,10 +26,10 @@ declare @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i8.nxv1f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv1i8_nxv1f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv1i8_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv1i8_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu @@ -38,16 +40,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i8.nxv2f16( , - i32); + iXLen); -define @intrinsic_vfncvt_rtz.xu.f.w_nxv2i8_nxv2f16( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_rtz.xu.f.w_nxv2i8_nxv2f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv2i8_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu @@ -57,7 +59,7 @@ define @intrinsic_vfncvt_rtz.xu.f.w_nxv2i8_nxv2f16( @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i8.nxv2f16( %0, - i32 %1) + iXLen %1) ret %a } @@ -66,10 +68,10 @@ declare @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i8.nxv2f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv2i8_nxv2f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv2i8_nxv2f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv2i8_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu @@ -80,16 +82,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i8.nxv4f16( , - i32); + iXLen); -define @intrinsic_vfncvt_rtz.xu.f.w_nxv4i8_nxv4f16( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_rtz.xu.f.w_nxv4i8_nxv4f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv4i8_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu @@ -99,7 +101,7 @@ define @intrinsic_vfncvt_rtz.xu.f.w_nxv4i8_nxv4f16( @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i8.nxv4f16( %0, - i32 %1) + iXLen %1) ret %a } @@ -108,10 +110,10 @@ declare @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i8.nxv4f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv4i8_nxv4f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv4i8_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv4i8_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu @@ -122,16 +124,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i8.nxv8f16( , - i32); + iXLen); -define @intrinsic_vfncvt_rtz.xu.f.w_nxv8i8_nxv8f16( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_rtz.xu.f.w_nxv8i8_nxv8f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv8i8_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu @@ -141,7 +143,7 @@ define @intrinsic_vfncvt_rtz.xu.f.w_nxv8i8_nxv8f16( @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i8.nxv8f16( %0, - i32 %1) + iXLen %1) ret %a } @@ -150,10 +152,10 @@ declare @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i8.nxv8f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv8i8_nxv8f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv8i8_nxv8f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv8i8_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu @@ -164,16 +166,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i8.nxv16f16( , - i32); + iXLen); -define @intrinsic_vfncvt_rtz.xu.f.w_nxv16i8_nxv16f16( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_rtz.xu.f.w_nxv16i8_nxv16f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv16i8_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu @@ -183,7 +185,7 @@ define @intrinsic_vfncvt_rtz.xu.f.w_nxv16i8_nxv16f16( @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i8.nxv16f16( %0, - i32 %1) + iXLen %1) ret %a } @@ -192,10 +194,10 @@ declare @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i8.nxv16f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv16i8_nxv16f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv16i8_nxv16f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv16i8_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu @@ -206,16 +208,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv32i8.nxv32f16( , - i32); + iXLen); -define @intrinsic_vfncvt_rtz.xu.f.w_nxv32i8_nxv32f16( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_rtz.xu.f.w_nxv32i8_nxv32f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv32i8_nxv32f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu @@ -225,7 +227,7 @@ define @intrinsic_vfncvt_rtz.xu.f.w_nxv32i8_nxv32f16( @llvm.riscv.vfncvt.rtz.xu.f.w.nxv32i8.nxv32f16( %0, - i32 %1) + iXLen %1) ret %a } @@ -234,10 +236,10 @@ declare @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv32i8.nxv32f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv32i8_nxv32f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv32i8_nxv32f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv32i8_nxv32f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu @@ -248,16 +250,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i16.nxv1f32( , - i32); + iXLen); -define @intrinsic_vfncvt_rtz.xu.f.w_nxv1i16_nxv1f32( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_rtz.xu.f.w_nxv1i16_nxv1f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv1i16_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -267,7 +269,7 @@ define @intrinsic_vfncvt_rtz.xu.f.w_nxv1i16_nxv1f32( @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i16.nxv1f32( %0, - i32 %1) + iXLen %1) ret %a } @@ -276,10 +278,10 @@ declare @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i16.nxv1f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv1i16_nxv1f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv1i16_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv1i16_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -290,16 +292,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i16.nxv2f32( , - i32); + iXLen); -define @intrinsic_vfncvt_rtz.xu.f.w_nxv2i16_nxv2f32( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_rtz.xu.f.w_nxv2i16_nxv2f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv2i16_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -309,7 +311,7 @@ define @intrinsic_vfncvt_rtz.xu.f.w_nxv2i16_nxv2f32( @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i16.nxv2f32( %0, - i32 %1) + iXLen %1) ret %a } @@ -318,10 +320,10 @@ declare @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i16.nxv2f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv2i16_nxv2f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv2i16_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv2i16_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -332,16 +334,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i16.nxv4f32( , - i32); + iXLen); -define @intrinsic_vfncvt_rtz.xu.f.w_nxv4i16_nxv4f32( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_rtz.xu.f.w_nxv4i16_nxv4f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv4i16_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -351,7 +353,7 @@ define @intrinsic_vfncvt_rtz.xu.f.w_nxv4i16_nxv4f32( @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i16.nxv4f32( %0, - i32 %1) + iXLen %1) ret %a } @@ -360,10 +362,10 @@ declare @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i16.nxv4f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv4i16_nxv4f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv4i16_nxv4f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv4i16_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -374,16 +376,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i16.nxv8f32( , - i32); + iXLen); -define @intrinsic_vfncvt_rtz.xu.f.w_nxv8i16_nxv8f32( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_rtz.xu.f.w_nxv8i16_nxv8f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv8i16_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -393,7 +395,7 @@ define @intrinsic_vfncvt_rtz.xu.f.w_nxv8i16_nxv8f32( @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i16.nxv8f32( %0, - i32 %1) + iXLen %1) ret %a } @@ -402,10 +404,10 @@ declare @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i16.nxv8f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv8i16_nxv8f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv8i16_nxv8f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv8i16_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -416,16 +418,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i16.nxv16f32( , - i32); + iXLen); -define @intrinsic_vfncvt_rtz.xu.f.w_nxv16i16_nxv16f32( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_rtz.xu.f.w_nxv16i16_nxv16f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv16i16_nxv16f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -435,7 +437,7 @@ define @intrinsic_vfncvt_rtz.xu.f.w_nxv16i16_nxv16f32( @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i16.nxv16f32( %0, - i32 %1) + iXLen %1) ret %a } @@ -444,10 +446,10 @@ declare @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i16.nxv16f32 , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv16i16_nxv16f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv16i16_nxv16f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv16i16_nxv16f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -458,16 +460,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i32.nxv1f64( , - i32); + iXLen); -define @intrinsic_vfncvt_rtz.xu.f.w_nxv1i32_nxv1f64( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_rtz.xu.f.w_nxv1i32_nxv1f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv1i32_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -477,7 +479,7 @@ define @intrinsic_vfncvt_rtz.xu.f.w_nxv1i32_nxv1f64( @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i32.nxv1f64( %0, - i32 %1) + iXLen %1) ret %a } @@ -486,10 +488,10 @@ declare @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i32.nxv1f64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv1i32_nxv1f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv1i32_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv1i32_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -500,16 +502,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i32.nxv2f64( , - i32); + iXLen); -define @intrinsic_vfncvt_rtz.xu.f.w_nxv2i32_nxv2f64( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_rtz.xu.f.w_nxv2i32_nxv2f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv2i32_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -519,7 +521,7 @@ define @intrinsic_vfncvt_rtz.xu.f.w_nxv2i32_nxv2f64( @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i32.nxv2f64( %0, - i32 %1) + iXLen %1) ret %a } @@ -528,10 +530,10 @@ declare @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i32.nxv2f64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv2i32_nxv2f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv2i32_nxv2f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv2i32_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -542,16 +544,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i32.nxv4f64( , - i32); + iXLen); -define @intrinsic_vfncvt_rtz.xu.f.w_nxv4i32_nxv4f64( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_rtz.xu.f.w_nxv4i32_nxv4f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv4i32_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -561,7 +563,7 @@ define @intrinsic_vfncvt_rtz.xu.f.w_nxv4i32_nxv4f64( @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i32.nxv4f64( %0, - i32 %1) + iXLen %1) ret %a } @@ -570,10 +572,10 @@ declare @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i32.nxv4f64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv4i32_nxv4f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv4i32_nxv4f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv4i32_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -584,16 +586,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i32.nxv8f64( , - i32); + iXLen); -define @intrinsic_vfncvt_rtz.xu.f.w_nxv8i32_nxv8f64( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_rtz.xu.f.w_nxv8i32_nxv8f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv8i32_nxv8f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -603,7 +605,7 @@ define @intrinsic_vfncvt_rtz.xu.f.w_nxv8i32_nxv8f64( @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i32.nxv8f64( %0, - i32 %1) + iXLen %1) ret %a } @@ -612,10 +614,10 @@ declare @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i32.nxv8f64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv8i32_nxv8f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv8i32_nxv8f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv8i32_nxv8f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -626,7 +628,7 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-x-f-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-x-f-rv64.ll deleted file mode 100644 index d78d695f3df20181b4cb7ea5968ffb43066d70b3..0000000000000000000000000000000000000000 --- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-x-f-rv64.ll +++ /dev/null @@ -1,632 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s -declare @llvm.riscv.vfncvt.x.f.w.nxv1i8.nxv1f16( - , - i64); - -define @intrinsic_vfncvt_x.f.w_nxv1i8_nxv1f16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv1i8_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu -; CHECK-NEXT: vfncvt.x.f.w v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.x.f.w.nxv1i8.nxv1f16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.x.f.w.mask.nxv1i8.nxv1f16( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_x.f.w_nxv1i8_nxv1f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv1i8_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu -; CHECK-NEXT: vfncvt.x.f.w v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.x.f.w.mask.nxv1i8.nxv1f16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.x.f.w.nxv2i8.nxv2f16( - , - i64); - -define @intrinsic_vfncvt_x.f.w_nxv2i8_nxv2f16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv2i8_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu -; CHECK-NEXT: vfncvt.x.f.w v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.x.f.w.nxv2i8.nxv2f16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.x.f.w.mask.nxv2i8.nxv2f16( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_x.f.w_nxv2i8_nxv2f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv2i8_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu -; CHECK-NEXT: vfncvt.x.f.w v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.x.f.w.mask.nxv2i8.nxv2f16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.x.f.w.nxv4i8.nxv4f16( - , - i64); - -define @intrinsic_vfncvt_x.f.w_nxv4i8_nxv4f16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv4i8_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu -; CHECK-NEXT: vfncvt.x.f.w v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.x.f.w.nxv4i8.nxv4f16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.x.f.w.mask.nxv4i8.nxv4f16( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_x.f.w_nxv4i8_nxv4f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv4i8_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu -; CHECK-NEXT: vfncvt.x.f.w v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.x.f.w.mask.nxv4i8.nxv4f16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.x.f.w.nxv8i8.nxv8f16( - , - i64); - -define @intrinsic_vfncvt_x.f.w_nxv8i8_nxv8f16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv8i8_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu -; CHECK-NEXT: vfncvt.x.f.w v10, v8 -; CHECK-NEXT: vmv.v.v v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.x.f.w.nxv8i8.nxv8f16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.x.f.w.mask.nxv8i8.nxv8f16( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_x.f.w_nxv8i8_nxv8f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv8i8_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu -; CHECK-NEXT: vfncvt.x.f.w v8, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.x.f.w.mask.nxv8i8.nxv8f16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.x.f.w.nxv16i8.nxv16f16( - , - i64); - -define @intrinsic_vfncvt_x.f.w_nxv16i8_nxv16f16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv16i8_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu -; CHECK-NEXT: vfncvt.x.f.w v12, v8 -; CHECK-NEXT: vmv.v.v v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.x.f.w.nxv16i8.nxv16f16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.x.f.w.mask.nxv16i8.nxv16f16( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_x.f.w_nxv16i8_nxv16f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv16i8_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu -; CHECK-NEXT: vfncvt.x.f.w v8, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.x.f.w.mask.nxv16i8.nxv16f16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.x.f.w.nxv32i8.nxv32f16( - , - i64); - -define @intrinsic_vfncvt_x.f.w_nxv32i8_nxv32f16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv32i8_nxv32f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu -; CHECK-NEXT: vfncvt.x.f.w v16, v8 -; CHECK-NEXT: vmv.v.v v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.x.f.w.nxv32i8.nxv32f16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.x.f.w.mask.nxv32i8.nxv32f16( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_x.f.w_nxv32i8_nxv32f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv32i8_nxv32f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu -; CHECK-NEXT: vfncvt.x.f.w v8, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.x.f.w.mask.nxv32i8.nxv32f16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.x.f.w.nxv1i16.nxv1f32( - , - i64); - -define @intrinsic_vfncvt_x.f.w_nxv1i16_nxv1f32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv1i16_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfncvt.x.f.w v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.x.f.w.nxv1i16.nxv1f32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.x.f.w.mask.nxv1i16.nxv1f32( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_x.f.w_nxv1i16_nxv1f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv1i16_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfncvt.x.f.w v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.x.f.w.mask.nxv1i16.nxv1f32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.x.f.w.nxv2i16.nxv2f32( - , - i64); - -define @intrinsic_vfncvt_x.f.w_nxv2i16_nxv2f32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv2i16_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfncvt.x.f.w v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.x.f.w.nxv2i16.nxv2f32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.x.f.w.mask.nxv2i16.nxv2f32( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_x.f.w_nxv2i16_nxv2f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv2i16_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfncvt.x.f.w v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.x.f.w.mask.nxv2i16.nxv2f32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.x.f.w.nxv4i16.nxv4f32( - , - i64); - -define @intrinsic_vfncvt_x.f.w_nxv4i16_nxv4f32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv4i16_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfncvt.x.f.w v10, v8 -; CHECK-NEXT: vmv.v.v v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.x.f.w.nxv4i16.nxv4f32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.x.f.w.mask.nxv4i16.nxv4f32( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_x.f.w_nxv4i16_nxv4f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv4i16_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfncvt.x.f.w v8, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.x.f.w.mask.nxv4i16.nxv4f32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.x.f.w.nxv8i16.nxv8f32( - , - i64); - -define @intrinsic_vfncvt_x.f.w_nxv8i16_nxv8f32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv8i16_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfncvt.x.f.w v12, v8 -; CHECK-NEXT: vmv.v.v v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.x.f.w.nxv8i16.nxv8f32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.x.f.w.mask.nxv8i16.nxv8f32( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_x.f.w_nxv8i16_nxv8f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv8i16_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfncvt.x.f.w v8, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.x.f.w.mask.nxv8i16.nxv8f32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.x.f.w.nxv16i16.nxv16f32( - , - i64); - -define @intrinsic_vfncvt_x.f.w_nxv16i16_nxv16f32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv16i16_nxv16f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfncvt.x.f.w v16, v8 -; CHECK-NEXT: vmv.v.v v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.x.f.w.nxv16i16.nxv16f32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.x.f.w.mask.nxv16i16.nxv16f32( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_x.f.w_nxv16i16_nxv16f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv16i16_nxv16f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfncvt.x.f.w v8, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.x.f.w.mask.nxv16i16.nxv16f32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.x.f.w.nxv1i32.nxv1f64( - , - i64); - -define @intrinsic_vfncvt_x.f.w_nxv1i32_nxv1f64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv1i32_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfncvt.x.f.w v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.x.f.w.nxv1i32.nxv1f64( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.x.f.w.mask.nxv1i32.nxv1f64( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_x.f.w_nxv1i32_nxv1f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv1i32_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfncvt.x.f.w v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.x.f.w.mask.nxv1i32.nxv1f64( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.x.f.w.nxv2i32.nxv2f64( - , - i64); - -define @intrinsic_vfncvt_x.f.w_nxv2i32_nxv2f64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv2i32_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfncvt.x.f.w v10, v8 -; CHECK-NEXT: vmv.v.v v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.x.f.w.nxv2i32.nxv2f64( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.x.f.w.mask.nxv2i32.nxv2f64( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_x.f.w_nxv2i32_nxv2f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv2i32_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfncvt.x.f.w v8, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.x.f.w.mask.nxv2i32.nxv2f64( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.x.f.w.nxv4i32.nxv4f64( - , - i64); - -define @intrinsic_vfncvt_x.f.w_nxv4i32_nxv4f64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv4i32_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfncvt.x.f.w v12, v8 -; CHECK-NEXT: vmv.v.v v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.x.f.w.nxv4i32.nxv4f64( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.x.f.w.mask.nxv4i32.nxv4f64( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_x.f.w_nxv4i32_nxv4f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv4i32_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfncvt.x.f.w v8, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.x.f.w.mask.nxv4i32.nxv4f64( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.x.f.w.nxv8i32.nxv8f64( - , - i64); - -define @intrinsic_vfncvt_x.f.w_nxv8i32_nxv8f64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv8i32_nxv8f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfncvt.x.f.w v16, v8 -; CHECK-NEXT: vmv.v.v v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.x.f.w.nxv8i32.nxv8f64( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.x.f.w.mask.nxv8i32.nxv8f64( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_x.f.w_nxv8i32_nxv8f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv8i32_nxv8f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfncvt.x.f.w v8, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.x.f.w.mask.nxv8i32.nxv8f64( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-x-f-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-x-f.ll similarity index 87% rename from llvm/test/CodeGen/RISCV/rvv/vfncvt-x-f-rv32.ll rename to llvm/test/CodeGen/RISCV/rvv/vfncvt-x-f.ll index 6c97455f1992e15fa86bc7926b79e629279a1fbd..0b8f9c62e50aba16a905148cc6e53f15e47c0d46 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-x-f-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-x-f.ll @@ -1,11 +1,13 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfncvt.x.f.w.nxv1i8.nxv1f16( , - i32); + iXLen); -define @intrinsic_vfncvt_x.f.w_nxv1i8_nxv1f16( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_x.f.w_nxv1i8_nxv1f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv1i8_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu @@ -15,7 +17,7 @@ define @intrinsic_vfncvt_x.f.w_nxv1i8_nxv1f16( @llvm.riscv.vfncvt.x.f.w.nxv1i8.nxv1f16( %0, - i32 %1) + iXLen %1) ret %a } @@ -24,10 +26,10 @@ declare @llvm.riscv.vfncvt.x.f.w.mask.nxv1i8.nxv1f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_x.f.w_nxv1i8_nxv1f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_x.f.w_nxv1i8_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv1i8_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu @@ -38,16 +40,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.x.f.w.nxv2i8.nxv2f16( , - i32); + iXLen); -define @intrinsic_vfncvt_x.f.w_nxv2i8_nxv2f16( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_x.f.w_nxv2i8_nxv2f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv2i8_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu @@ -57,7 +59,7 @@ define @intrinsic_vfncvt_x.f.w_nxv2i8_nxv2f16( @llvm.riscv.vfncvt.x.f.w.nxv2i8.nxv2f16( %0, - i32 %1) + iXLen %1) ret %a } @@ -66,10 +68,10 @@ declare @llvm.riscv.vfncvt.x.f.w.mask.nxv2i8.nxv2f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_x.f.w_nxv2i8_nxv2f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_x.f.w_nxv2i8_nxv2f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv2i8_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu @@ -80,16 +82,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.x.f.w.nxv4i8.nxv4f16( , - i32); + iXLen); -define @intrinsic_vfncvt_x.f.w_nxv4i8_nxv4f16( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_x.f.w_nxv4i8_nxv4f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv4i8_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu @@ -99,7 +101,7 @@ define @intrinsic_vfncvt_x.f.w_nxv4i8_nxv4f16( @llvm.riscv.vfncvt.x.f.w.nxv4i8.nxv4f16( %0, - i32 %1) + iXLen %1) ret %a } @@ -108,10 +110,10 @@ declare @llvm.riscv.vfncvt.x.f.w.mask.nxv4i8.nxv4f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_x.f.w_nxv4i8_nxv4f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_x.f.w_nxv4i8_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv4i8_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu @@ -122,16 +124,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.x.f.w.nxv8i8.nxv8f16( , - i32); + iXLen); -define @intrinsic_vfncvt_x.f.w_nxv8i8_nxv8f16( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_x.f.w_nxv8i8_nxv8f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv8i8_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu @@ -141,7 +143,7 @@ define @intrinsic_vfncvt_x.f.w_nxv8i8_nxv8f16( @llvm.riscv.vfncvt.x.f.w.nxv8i8.nxv8f16( %0, - i32 %1) + iXLen %1) ret %a } @@ -150,10 +152,10 @@ declare @llvm.riscv.vfncvt.x.f.w.mask.nxv8i8.nxv8f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_x.f.w_nxv8i8_nxv8f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_x.f.w_nxv8i8_nxv8f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv8i8_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu @@ -164,16 +166,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.x.f.w.nxv16i8.nxv16f16( , - i32); + iXLen); -define @intrinsic_vfncvt_x.f.w_nxv16i8_nxv16f16( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_x.f.w_nxv16i8_nxv16f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv16i8_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu @@ -183,7 +185,7 @@ define @intrinsic_vfncvt_x.f.w_nxv16i8_nxv16f16( @llvm.riscv.vfncvt.x.f.w.nxv16i8.nxv16f16( %0, - i32 %1) + iXLen %1) ret %a } @@ -192,10 +194,10 @@ declare @llvm.riscv.vfncvt.x.f.w.mask.nxv16i8.nxv16f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_x.f.w_nxv16i8_nxv16f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_x.f.w_nxv16i8_nxv16f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv16i8_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu @@ -206,16 +208,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.x.f.w.nxv32i8.nxv32f16( , - i32); + iXLen); -define @intrinsic_vfncvt_x.f.w_nxv32i8_nxv32f16( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_x.f.w_nxv32i8_nxv32f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv32i8_nxv32f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu @@ -225,7 +227,7 @@ define @intrinsic_vfncvt_x.f.w_nxv32i8_nxv32f16( @llvm.riscv.vfncvt.x.f.w.nxv32i8.nxv32f16( %0, - i32 %1) + iXLen %1) ret %a } @@ -234,10 +236,10 @@ declare @llvm.riscv.vfncvt.x.f.w.mask.nxv32i8.nxv32f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_x.f.w_nxv32i8_nxv32f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_x.f.w_nxv32i8_nxv32f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv32i8_nxv32f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu @@ -248,16 +250,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.x.f.w.nxv1i16.nxv1f32( , - i32); + iXLen); -define @intrinsic_vfncvt_x.f.w_nxv1i16_nxv1f32( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_x.f.w_nxv1i16_nxv1f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv1i16_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -267,7 +269,7 @@ define @intrinsic_vfncvt_x.f.w_nxv1i16_nxv1f32( @llvm.riscv.vfncvt.x.f.w.nxv1i16.nxv1f32( %0, - i32 %1) + iXLen %1) ret %a } @@ -276,10 +278,10 @@ declare @llvm.riscv.vfncvt.x.f.w.mask.nxv1i16.nxv1f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_x.f.w_nxv1i16_nxv1f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_x.f.w_nxv1i16_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv1i16_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -290,16 +292,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.x.f.w.nxv2i16.nxv2f32( , - i32); + iXLen); -define @intrinsic_vfncvt_x.f.w_nxv2i16_nxv2f32( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_x.f.w_nxv2i16_nxv2f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv2i16_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -309,7 +311,7 @@ define @intrinsic_vfncvt_x.f.w_nxv2i16_nxv2f32( @llvm.riscv.vfncvt.x.f.w.nxv2i16.nxv2f32( %0, - i32 %1) + iXLen %1) ret %a } @@ -318,10 +320,10 @@ declare @llvm.riscv.vfncvt.x.f.w.mask.nxv2i16.nxv2f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_x.f.w_nxv2i16_nxv2f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_x.f.w_nxv2i16_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv2i16_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -332,16 +334,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.x.f.w.nxv4i16.nxv4f32( , - i32); + iXLen); -define @intrinsic_vfncvt_x.f.w_nxv4i16_nxv4f32( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_x.f.w_nxv4i16_nxv4f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv4i16_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -351,7 +353,7 @@ define @intrinsic_vfncvt_x.f.w_nxv4i16_nxv4f32( @llvm.riscv.vfncvt.x.f.w.nxv4i16.nxv4f32( %0, - i32 %1) + iXLen %1) ret %a } @@ -360,10 +362,10 @@ declare @llvm.riscv.vfncvt.x.f.w.mask.nxv4i16.nxv4f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_x.f.w_nxv4i16_nxv4f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_x.f.w_nxv4i16_nxv4f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv4i16_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -374,16 +376,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.x.f.w.nxv8i16.nxv8f32( , - i32); + iXLen); -define @intrinsic_vfncvt_x.f.w_nxv8i16_nxv8f32( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_x.f.w_nxv8i16_nxv8f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv8i16_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -393,7 +395,7 @@ define @intrinsic_vfncvt_x.f.w_nxv8i16_nxv8f32( @llvm.riscv.vfncvt.x.f.w.nxv8i16.nxv8f32( %0, - i32 %1) + iXLen %1) ret %a } @@ -402,10 +404,10 @@ declare @llvm.riscv.vfncvt.x.f.w.mask.nxv8i16.nxv8f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_x.f.w_nxv8i16_nxv8f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_x.f.w_nxv8i16_nxv8f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv8i16_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -416,16 +418,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.x.f.w.nxv16i16.nxv16f32( , - i32); + iXLen); -define @intrinsic_vfncvt_x.f.w_nxv16i16_nxv16f32( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_x.f.w_nxv16i16_nxv16f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv16i16_nxv16f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -435,7 +437,7 @@ define @intrinsic_vfncvt_x.f.w_nxv16i16_nxv16f32( @llvm.riscv.vfncvt.x.f.w.nxv16i16.nxv16f32( %0, - i32 %1) + iXLen %1) ret %a } @@ -444,10 +446,10 @@ declare @llvm.riscv.vfncvt.x.f.w.mask.nxv16i16.nxv16f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_x.f.w_nxv16i16_nxv16f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_x.f.w_nxv16i16_nxv16f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv16i16_nxv16f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -458,16 +460,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.x.f.w.nxv1i32.nxv1f64( , - i32); + iXLen); -define @intrinsic_vfncvt_x.f.w_nxv1i32_nxv1f64( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_x.f.w_nxv1i32_nxv1f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv1i32_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -477,7 +479,7 @@ define @intrinsic_vfncvt_x.f.w_nxv1i32_nxv1f64( @llvm.riscv.vfncvt.x.f.w.nxv1i32.nxv1f64( %0, - i32 %1) + iXLen %1) ret %a } @@ -486,10 +488,10 @@ declare @llvm.riscv.vfncvt.x.f.w.mask.nxv1i32.nxv1f64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_x.f.w_nxv1i32_nxv1f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_x.f.w_nxv1i32_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv1i32_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -500,16 +502,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.x.f.w.nxv2i32.nxv2f64( , - i32); + iXLen); -define @intrinsic_vfncvt_x.f.w_nxv2i32_nxv2f64( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_x.f.w_nxv2i32_nxv2f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv2i32_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -519,7 +521,7 @@ define @intrinsic_vfncvt_x.f.w_nxv2i32_nxv2f64( @llvm.riscv.vfncvt.x.f.w.nxv2i32.nxv2f64( %0, - i32 %1) + iXLen %1) ret %a } @@ -528,10 +530,10 @@ declare @llvm.riscv.vfncvt.x.f.w.mask.nxv2i32.nxv2f64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_x.f.w_nxv2i32_nxv2f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_x.f.w_nxv2i32_nxv2f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv2i32_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -542,16 +544,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.x.f.w.nxv4i32.nxv4f64( , - i32); + iXLen); -define @intrinsic_vfncvt_x.f.w_nxv4i32_nxv4f64( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_x.f.w_nxv4i32_nxv4f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv4i32_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -561,7 +563,7 @@ define @intrinsic_vfncvt_x.f.w_nxv4i32_nxv4f64( @llvm.riscv.vfncvt.x.f.w.nxv4i32.nxv4f64( %0, - i32 %1) + iXLen %1) ret %a } @@ -570,10 +572,10 @@ declare @llvm.riscv.vfncvt.x.f.w.mask.nxv4i32.nxv4f64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_x.f.w_nxv4i32_nxv4f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_x.f.w_nxv4i32_nxv4f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv4i32_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -584,16 +586,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.x.f.w.nxv8i32.nxv8f64( , - i32); + iXLen); -define @intrinsic_vfncvt_x.f.w_nxv8i32_nxv8f64( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_x.f.w_nxv8i32_nxv8f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv8i32_nxv8f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -603,7 +605,7 @@ define @intrinsic_vfncvt_x.f.w_nxv8i32_nxv8f64( @llvm.riscv.vfncvt.x.f.w.nxv8i32.nxv8f64( %0, - i32 %1) + iXLen %1) ret %a } @@ -612,10 +614,10 @@ declare @llvm.riscv.vfncvt.x.f.w.mask.nxv8i32.nxv8f64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_x.f.w_nxv8i32_nxv8f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_x.f.w_nxv8i32_nxv8f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv8i32_nxv8f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -626,7 +628,7 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-xu-f-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-xu-f-rv64.ll deleted file mode 100644 index c7bb913f3797f552778d0c5a6fca8fd977da96b4..0000000000000000000000000000000000000000 --- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-xu-f-rv64.ll +++ /dev/null @@ -1,632 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s -declare @llvm.riscv.vfncvt.xu.f.w.nxv1i8.nxv1f16( - , - i64); - -define @intrinsic_vfncvt_xu.f.w_nxv1i8_nxv1f16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv1i8_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu -; CHECK-NEXT: vfncvt.xu.f.w v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.xu.f.w.nxv1i8.nxv1f16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1f16( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_xu.f.w_nxv1i8_nxv1f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv1i8_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu -; CHECK-NEXT: vfncvt.xu.f.w v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1f16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.xu.f.w.nxv2i8.nxv2f16( - , - i64); - -define @intrinsic_vfncvt_xu.f.w_nxv2i8_nxv2f16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv2i8_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu -; CHECK-NEXT: vfncvt.xu.f.w v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.xu.f.w.nxv2i8.nxv2f16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i8.nxv2f16( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_xu.f.w_nxv2i8_nxv2f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv2i8_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu -; CHECK-NEXT: vfncvt.xu.f.w v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i8.nxv2f16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.xu.f.w.nxv4i8.nxv4f16( - , - i64); - -define @intrinsic_vfncvt_xu.f.w_nxv4i8_nxv4f16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv4i8_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu -; CHECK-NEXT: vfncvt.xu.f.w v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.xu.f.w.nxv4i8.nxv4f16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i8.nxv4f16( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_xu.f.w_nxv4i8_nxv4f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv4i8_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu -; CHECK-NEXT: vfncvt.xu.f.w v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i8.nxv4f16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.xu.f.w.nxv8i8.nxv8f16( - , - i64); - -define @intrinsic_vfncvt_xu.f.w_nxv8i8_nxv8f16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv8i8_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu -; CHECK-NEXT: vfncvt.xu.f.w v10, v8 -; CHECK-NEXT: vmv.v.v v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.xu.f.w.nxv8i8.nxv8f16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i8.nxv8f16( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_xu.f.w_nxv8i8_nxv8f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv8i8_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu -; CHECK-NEXT: vfncvt.xu.f.w v8, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i8.nxv8f16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.xu.f.w.nxv16i8.nxv16f16( - , - i64); - -define @intrinsic_vfncvt_xu.f.w_nxv16i8_nxv16f16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv16i8_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu -; CHECK-NEXT: vfncvt.xu.f.w v12, v8 -; CHECK-NEXT: vmv.v.v v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.xu.f.w.nxv16i8.nxv16f16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i8.nxv16f16( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_xu.f.w_nxv16i8_nxv16f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv16i8_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu -; CHECK-NEXT: vfncvt.xu.f.w v8, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i8.nxv16f16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.xu.f.w.nxv32i8.nxv32f16( - , - i64); - -define @intrinsic_vfncvt_xu.f.w_nxv32i8_nxv32f16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv32i8_nxv32f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu -; CHECK-NEXT: vfncvt.xu.f.w v16, v8 -; CHECK-NEXT: vmv.v.v v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.xu.f.w.nxv32i8.nxv32f16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv32i8.nxv32f16( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_xu.f.w_nxv32i8_nxv32f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv32i8_nxv32f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu -; CHECK-NEXT: vfncvt.xu.f.w v8, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv32i8.nxv32f16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.xu.f.w.nxv1i16.nxv1f32( - , - i64); - -define @intrinsic_vfncvt_xu.f.w_nxv1i16_nxv1f32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv1i16_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfncvt.xu.f.w v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.xu.f.w.nxv1i16.nxv1f32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i16.nxv1f32( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_xu.f.w_nxv1i16_nxv1f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv1i16_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfncvt.xu.f.w v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i16.nxv1f32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.xu.f.w.nxv2i16.nxv2f32( - , - i64); - -define @intrinsic_vfncvt_xu.f.w_nxv2i16_nxv2f32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv2i16_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfncvt.xu.f.w v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.xu.f.w.nxv2i16.nxv2f32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i16.nxv2f32( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_xu.f.w_nxv2i16_nxv2f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv2i16_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfncvt.xu.f.w v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i16.nxv2f32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.xu.f.w.nxv4i16.nxv4f32( - , - i64); - -define @intrinsic_vfncvt_xu.f.w_nxv4i16_nxv4f32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv4i16_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfncvt.xu.f.w v10, v8 -; CHECK-NEXT: vmv.v.v v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.xu.f.w.nxv4i16.nxv4f32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i16.nxv4f32( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_xu.f.w_nxv4i16_nxv4f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv4i16_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfncvt.xu.f.w v8, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i16.nxv4f32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.xu.f.w.nxv8i16.nxv8f32( - , - i64); - -define @intrinsic_vfncvt_xu.f.w_nxv8i16_nxv8f32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv8i16_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfncvt.xu.f.w v12, v8 -; CHECK-NEXT: vmv.v.v v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.xu.f.w.nxv8i16.nxv8f32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i16.nxv8f32( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_xu.f.w_nxv8i16_nxv8f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv8i16_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfncvt.xu.f.w v8, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i16.nxv8f32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.xu.f.w.nxv16i16.nxv16f32( - , - i64); - -define @intrinsic_vfncvt_xu.f.w_nxv16i16_nxv16f32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv16i16_nxv16f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfncvt.xu.f.w v16, v8 -; CHECK-NEXT: vmv.v.v v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.xu.f.w.nxv16i16.nxv16f32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i16.nxv16f32( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_xu.f.w_nxv16i16_nxv16f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv16i16_nxv16f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfncvt.xu.f.w v8, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i16.nxv16f32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.xu.f.w.nxv1i32.nxv1f64( - , - i64); - -define @intrinsic_vfncvt_xu.f.w_nxv1i32_nxv1f64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv1i32_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfncvt.xu.f.w v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.xu.f.w.nxv1i32.nxv1f64( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i32.nxv1f64( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_xu.f.w_nxv1i32_nxv1f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv1i32_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfncvt.xu.f.w v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i32.nxv1f64( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.xu.f.w.nxv2i32.nxv2f64( - , - i64); - -define @intrinsic_vfncvt_xu.f.w_nxv2i32_nxv2f64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv2i32_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfncvt.xu.f.w v10, v8 -; CHECK-NEXT: vmv.v.v v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.xu.f.w.nxv2i32.nxv2f64( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i32.nxv2f64( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_xu.f.w_nxv2i32_nxv2f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv2i32_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfncvt.xu.f.w v8, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i32.nxv2f64( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.xu.f.w.nxv4i32.nxv4f64( - , - i64); - -define @intrinsic_vfncvt_xu.f.w_nxv4i32_nxv4f64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv4i32_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfncvt.xu.f.w v12, v8 -; CHECK-NEXT: vmv.v.v v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.xu.f.w.nxv4i32.nxv4f64( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i32.nxv4f64( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_xu.f.w_nxv4i32_nxv4f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv4i32_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfncvt.xu.f.w v8, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i32.nxv4f64( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.xu.f.w.nxv8i32.nxv8f64( - , - i64); - -define @intrinsic_vfncvt_xu.f.w_nxv8i32_nxv8f64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv8i32_nxv8f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfncvt.xu.f.w v16, v8 -; CHECK-NEXT: vmv.v.v v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.xu.f.w.nxv8i32.nxv8f64( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i32.nxv8f64( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_xu.f.w_nxv8i32_nxv8f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv8i32_nxv8f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfncvt.xu.f.w v8, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i32.nxv8f64( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-xu-f-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-xu-f.ll similarity index 87% rename from llvm/test/CodeGen/RISCV/rvv/vfncvt-xu-f-rv32.ll rename to llvm/test/CodeGen/RISCV/rvv/vfncvt-xu-f.ll index 4981f8b16d7415f4b7861ec161202e6caea03fb5..7d802cabd3f779e37aca9acc52a07796840f8202 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-xu-f-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-xu-f.ll @@ -1,11 +1,13 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfncvt.xu.f.w.nxv1i8.nxv1f16( , - i32); + iXLen); -define @intrinsic_vfncvt_xu.f.w_nxv1i8_nxv1f16( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_xu.f.w_nxv1i8_nxv1f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv1i8_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu @@ -15,7 +17,7 @@ define @intrinsic_vfncvt_xu.f.w_nxv1i8_nxv1f16( @llvm.riscv.vfncvt.xu.f.w.nxv1i8.nxv1f16( %0, - i32 %1) + iXLen %1) ret %a } @@ -24,10 +26,10 @@ declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_xu.f.w_nxv1i8_nxv1f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_xu.f.w_nxv1i8_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv1i8_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu @@ -38,16 +40,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.xu.f.w.nxv2i8.nxv2f16( , - i32); + iXLen); -define @intrinsic_vfncvt_xu.f.w_nxv2i8_nxv2f16( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_xu.f.w_nxv2i8_nxv2f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv2i8_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu @@ -57,7 +59,7 @@ define @intrinsic_vfncvt_xu.f.w_nxv2i8_nxv2f16( @llvm.riscv.vfncvt.xu.f.w.nxv2i8.nxv2f16( %0, - i32 %1) + iXLen %1) ret %a } @@ -66,10 +68,10 @@ declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i8.nxv2f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_xu.f.w_nxv2i8_nxv2f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_xu.f.w_nxv2i8_nxv2f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv2i8_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu @@ -80,16 +82,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.xu.f.w.nxv4i8.nxv4f16( , - i32); + iXLen); -define @intrinsic_vfncvt_xu.f.w_nxv4i8_nxv4f16( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_xu.f.w_nxv4i8_nxv4f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv4i8_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu @@ -99,7 +101,7 @@ define @intrinsic_vfncvt_xu.f.w_nxv4i8_nxv4f16( @llvm.riscv.vfncvt.xu.f.w.nxv4i8.nxv4f16( %0, - i32 %1) + iXLen %1) ret %a } @@ -108,10 +110,10 @@ declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i8.nxv4f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_xu.f.w_nxv4i8_nxv4f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_xu.f.w_nxv4i8_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv4i8_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu @@ -122,16 +124,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.xu.f.w.nxv8i8.nxv8f16( , - i32); + iXLen); -define @intrinsic_vfncvt_xu.f.w_nxv8i8_nxv8f16( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_xu.f.w_nxv8i8_nxv8f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv8i8_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu @@ -141,7 +143,7 @@ define @intrinsic_vfncvt_xu.f.w_nxv8i8_nxv8f16( @llvm.riscv.vfncvt.xu.f.w.nxv8i8.nxv8f16( %0, - i32 %1) + iXLen %1) ret %a } @@ -150,10 +152,10 @@ declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i8.nxv8f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_xu.f.w_nxv8i8_nxv8f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_xu.f.w_nxv8i8_nxv8f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv8i8_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu @@ -164,16 +166,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.xu.f.w.nxv16i8.nxv16f16( , - i32); + iXLen); -define @intrinsic_vfncvt_xu.f.w_nxv16i8_nxv16f16( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_xu.f.w_nxv16i8_nxv16f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv16i8_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu @@ -183,7 +185,7 @@ define @intrinsic_vfncvt_xu.f.w_nxv16i8_nxv16f16( @llvm.riscv.vfncvt.xu.f.w.nxv16i8.nxv16f16( %0, - i32 %1) + iXLen %1) ret %a } @@ -192,10 +194,10 @@ declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i8.nxv16f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_xu.f.w_nxv16i8_nxv16f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_xu.f.w_nxv16i8_nxv16f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv16i8_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu @@ -206,16 +208,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.xu.f.w.nxv32i8.nxv32f16( , - i32); + iXLen); -define @intrinsic_vfncvt_xu.f.w_nxv32i8_nxv32f16( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_xu.f.w_nxv32i8_nxv32f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv32i8_nxv32f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu @@ -225,7 +227,7 @@ define @intrinsic_vfncvt_xu.f.w_nxv32i8_nxv32f16( @llvm.riscv.vfncvt.xu.f.w.nxv32i8.nxv32f16( %0, - i32 %1) + iXLen %1) ret %a } @@ -234,10 +236,10 @@ declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv32i8.nxv32f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_xu.f.w_nxv32i8_nxv32f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_xu.f.w_nxv32i8_nxv32f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv32i8_nxv32f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu @@ -248,16 +250,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.xu.f.w.nxv1i16.nxv1f32( , - i32); + iXLen); -define @intrinsic_vfncvt_xu.f.w_nxv1i16_nxv1f32( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_xu.f.w_nxv1i16_nxv1f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv1i16_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -267,7 +269,7 @@ define @intrinsic_vfncvt_xu.f.w_nxv1i16_nxv1f32( @llvm.riscv.vfncvt.xu.f.w.nxv1i16.nxv1f32( %0, - i32 %1) + iXLen %1) ret %a } @@ -276,10 +278,10 @@ declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i16.nxv1f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_xu.f.w_nxv1i16_nxv1f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_xu.f.w_nxv1i16_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv1i16_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -290,16 +292,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.xu.f.w.nxv2i16.nxv2f32( , - i32); + iXLen); -define @intrinsic_vfncvt_xu.f.w_nxv2i16_nxv2f32( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_xu.f.w_nxv2i16_nxv2f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv2i16_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -309,7 +311,7 @@ define @intrinsic_vfncvt_xu.f.w_nxv2i16_nxv2f32( @llvm.riscv.vfncvt.xu.f.w.nxv2i16.nxv2f32( %0, - i32 %1) + iXLen %1) ret %a } @@ -318,10 +320,10 @@ declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i16.nxv2f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_xu.f.w_nxv2i16_nxv2f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_xu.f.w_nxv2i16_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv2i16_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -332,16 +334,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.xu.f.w.nxv4i16.nxv4f32( , - i32); + iXLen); -define @intrinsic_vfncvt_xu.f.w_nxv4i16_nxv4f32( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_xu.f.w_nxv4i16_nxv4f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv4i16_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -351,7 +353,7 @@ define @intrinsic_vfncvt_xu.f.w_nxv4i16_nxv4f32( @llvm.riscv.vfncvt.xu.f.w.nxv4i16.nxv4f32( %0, - i32 %1) + iXLen %1) ret %a } @@ -360,10 +362,10 @@ declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i16.nxv4f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_xu.f.w_nxv4i16_nxv4f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_xu.f.w_nxv4i16_nxv4f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv4i16_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -374,16 +376,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.xu.f.w.nxv8i16.nxv8f32( , - i32); + iXLen); -define @intrinsic_vfncvt_xu.f.w_nxv8i16_nxv8f32( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_xu.f.w_nxv8i16_nxv8f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv8i16_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -393,7 +395,7 @@ define @intrinsic_vfncvt_xu.f.w_nxv8i16_nxv8f32( @llvm.riscv.vfncvt.xu.f.w.nxv8i16.nxv8f32( %0, - i32 %1) + iXLen %1) ret %a } @@ -402,10 +404,10 @@ declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i16.nxv8f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_xu.f.w_nxv8i16_nxv8f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_xu.f.w_nxv8i16_nxv8f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv8i16_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -416,16 +418,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.xu.f.w.nxv16i16.nxv16f32( , - i32); + iXLen); -define @intrinsic_vfncvt_xu.f.w_nxv16i16_nxv16f32( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_xu.f.w_nxv16i16_nxv16f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv16i16_nxv16f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -435,7 +437,7 @@ define @intrinsic_vfncvt_xu.f.w_nxv16i16_nxv16f32( @llvm.riscv.vfncvt.xu.f.w.nxv16i16.nxv16f32( %0, - i32 %1) + iXLen %1) ret %a } @@ -444,10 +446,10 @@ declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i16.nxv16f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_xu.f.w_nxv16i16_nxv16f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_xu.f.w_nxv16i16_nxv16f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv16i16_nxv16f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -458,16 +460,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.xu.f.w.nxv1i32.nxv1f64( , - i32); + iXLen); -define @intrinsic_vfncvt_xu.f.w_nxv1i32_nxv1f64( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_xu.f.w_nxv1i32_nxv1f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv1i32_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -477,7 +479,7 @@ define @intrinsic_vfncvt_xu.f.w_nxv1i32_nxv1f64( @llvm.riscv.vfncvt.xu.f.w.nxv1i32.nxv1f64( %0, - i32 %1) + iXLen %1) ret %a } @@ -486,10 +488,10 @@ declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i32.nxv1f64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_xu.f.w_nxv1i32_nxv1f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_xu.f.w_nxv1i32_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv1i32_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -500,16 +502,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.xu.f.w.nxv2i32.nxv2f64( , - i32); + iXLen); -define @intrinsic_vfncvt_xu.f.w_nxv2i32_nxv2f64( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_xu.f.w_nxv2i32_nxv2f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv2i32_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -519,7 +521,7 @@ define @intrinsic_vfncvt_xu.f.w_nxv2i32_nxv2f64( @llvm.riscv.vfncvt.xu.f.w.nxv2i32.nxv2f64( %0, - i32 %1) + iXLen %1) ret %a } @@ -528,10 +530,10 @@ declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i32.nxv2f64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_xu.f.w_nxv2i32_nxv2f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_xu.f.w_nxv2i32_nxv2f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv2i32_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -542,16 +544,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.xu.f.w.nxv4i32.nxv4f64( , - i32); + iXLen); -define @intrinsic_vfncvt_xu.f.w_nxv4i32_nxv4f64( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_xu.f.w_nxv4i32_nxv4f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv4i32_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -561,7 +563,7 @@ define @intrinsic_vfncvt_xu.f.w_nxv4i32_nxv4f64( @llvm.riscv.vfncvt.xu.f.w.nxv4i32.nxv4f64( %0, - i32 %1) + iXLen %1) ret %a } @@ -570,10 +572,10 @@ declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i32.nxv4f64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_xu.f.w_nxv4i32_nxv4f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_xu.f.w_nxv4i32_nxv4f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv4i32_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -584,16 +586,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.xu.f.w.nxv8i32.nxv8f64( , - i32); + iXLen); -define @intrinsic_vfncvt_xu.f.w_nxv8i32_nxv8f64( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_xu.f.w_nxv8i32_nxv8f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv8i32_nxv8f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -603,7 +605,7 @@ define @intrinsic_vfncvt_xu.f.w_nxv8i32_nxv8f64( @llvm.riscv.vfncvt.xu.f.w.nxv8i32.nxv8f64( %0, - i32 %1) + iXLen %1) ret %a } @@ -612,10 +614,10 @@ declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i32.nxv8f64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_xu.f.w_nxv8i32_nxv8f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_xu.f.w_nxv8i32_nxv8f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv8i32_nxv8f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -626,7 +628,7 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfnmacc-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmacc-rv64.ll deleted file mode 100644 index f8419e81f7d060242837ce0927dceb8ee1af2dc7..0000000000000000000000000000000000000000 --- a/llvm/test/CodeGen/RISCV/rvv/vfnmacc-rv64.ll +++ /dev/null @@ -1,1106 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: -target-abi=lp64d < %s | FileCheck %s -declare @llvm.riscv.vfnmacc.nxv1f16.nxv1f16( - , - , - , - i64); - -define @intrinsic_vfnmacc_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv1f16_nxv1f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu -; CHECK-NEXT: vfnmacc.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmacc.nxv1f16.nxv1f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfnmacc.mask.nxv1f16.nxv1f16( - , - , - , - , - i64); - -define @intrinsic_vfnmacc_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv1f16_nxv1f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu -; CHECK-NEXT: vfnmacc.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmacc.mask.nxv1f16.nxv1f16( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfnmacc.nxv2f16.nxv2f16( - , - , - , - i64); - -define @intrinsic_vfnmacc_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv2f16_nxv2f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu -; CHECK-NEXT: vfnmacc.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmacc.nxv2f16.nxv2f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfnmacc.mask.nxv2f16.nxv2f16( - , - , - , - , - i64); - -define @intrinsic_vfnmacc_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv2f16_nxv2f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu -; CHECK-NEXT: vfnmacc.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmacc.mask.nxv2f16.nxv2f16( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfnmacc.nxv4f16.nxv4f16( - , - , - , - i64); - -define @intrinsic_vfnmacc_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv4f16_nxv4f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu -; CHECK-NEXT: vfnmacc.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmacc.nxv4f16.nxv4f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfnmacc.mask.nxv4f16.nxv4f16( - , - , - , - , - i64); - -define @intrinsic_vfnmacc_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv4f16_nxv4f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu -; CHECK-NEXT: vfnmacc.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmacc.mask.nxv4f16.nxv4f16( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfnmacc.nxv8f16.nxv8f16( - , - , - , - i64); - -define @intrinsic_vfnmacc_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv8f16_nxv8f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu -; CHECK-NEXT: vfnmacc.vv v8, v10, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmacc.nxv8f16.nxv8f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfnmacc.mask.nxv8f16.nxv8f16( - , - , - , - , - i64); - -define @intrinsic_vfnmacc_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv8f16_nxv8f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu -; CHECK-NEXT: vfnmacc.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmacc.mask.nxv8f16.nxv8f16( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfnmacc.nxv16f16.nxv16f16( - , - , - , - i64); - -define @intrinsic_vfnmacc_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv16f16_nxv16f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu -; CHECK-NEXT: vfnmacc.vv v8, v12, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmacc.nxv16f16.nxv16f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfnmacc.mask.nxv16f16.nxv16f16( - , - , - , - , - i64); - -define @intrinsic_vfnmacc_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv16f16_nxv16f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu -; CHECK-NEXT: vfnmacc.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmacc.mask.nxv16f16.nxv16f16( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfnmacc.nxv1f32.nxv1f32( - , - , - , - i64); - -define @intrinsic_vfnmacc_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv1f32_nxv1f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu -; CHECK-NEXT: vfnmacc.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmacc.nxv1f32.nxv1f32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfnmacc.mask.nxv1f32.nxv1f32( - , - , - , - , - i64); - -define @intrinsic_vfnmacc_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv1f32_nxv1f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu -; CHECK-NEXT: vfnmacc.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmacc.mask.nxv1f32.nxv1f32( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfnmacc.nxv2f32.nxv2f32( - , - , - , - i64); - -define @intrinsic_vfnmacc_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv2f32_nxv2f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu -; CHECK-NEXT: vfnmacc.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmacc.nxv2f32.nxv2f32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfnmacc.mask.nxv2f32.nxv2f32( - , - , - , - , - i64); - -define @intrinsic_vfnmacc_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv2f32_nxv2f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu -; CHECK-NEXT: vfnmacc.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmacc.mask.nxv2f32.nxv2f32( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfnmacc.nxv4f32.nxv4f32( - , - , - , - i64); - -define @intrinsic_vfnmacc_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv4f32_nxv4f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu -; CHECK-NEXT: vfnmacc.vv v8, v10, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmacc.nxv4f32.nxv4f32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfnmacc.mask.nxv4f32.nxv4f32( - , - , - , - , - i64); - -define @intrinsic_vfnmacc_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv4f32_nxv4f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu -; CHECK-NEXT: vfnmacc.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmacc.mask.nxv4f32.nxv4f32( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfnmacc.nxv8f32.nxv8f32( - , - , - , - i64); - -define @intrinsic_vfnmacc_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv8f32_nxv8f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu -; CHECK-NEXT: vfnmacc.vv v8, v12, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmacc.nxv8f32.nxv8f32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfnmacc.mask.nxv8f32.nxv8f32( - , - , - , - , - i64); - -define @intrinsic_vfnmacc_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv8f32_nxv8f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu -; CHECK-NEXT: vfnmacc.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmacc.mask.nxv8f32.nxv8f32( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfnmacc.nxv1f64.nxv1f64( - , - , - , - i64); - -define @intrinsic_vfnmacc_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv1f64_nxv1f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu -; CHECK-NEXT: vfnmacc.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmacc.nxv1f64.nxv1f64( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfnmacc.mask.nxv1f64.nxv1f64( - , - , - , - , - i64); - -define @intrinsic_vfnmacc_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv1f64_nxv1f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu -; CHECK-NEXT: vfnmacc.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmacc.mask.nxv1f64.nxv1f64( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfnmacc.nxv2f64.nxv2f64( - , - , - , - i64); - -define @intrinsic_vfnmacc_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv2f64_nxv2f64_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu -; CHECK-NEXT: vfnmacc.vv v8, v10, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmacc.nxv2f64.nxv2f64( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfnmacc.mask.nxv2f64.nxv2f64( - , - , - , - , - i64); - -define @intrinsic_vfnmacc_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv2f64_nxv2f64_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu -; CHECK-NEXT: vfnmacc.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmacc.mask.nxv2f64.nxv2f64( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfnmacc.nxv4f64.nxv4f64( - , - , - , - i64); - -define @intrinsic_vfnmacc_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv4f64_nxv4f64_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu -; CHECK-NEXT: vfnmacc.vv v8, v12, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmacc.nxv4f64.nxv4f64( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfnmacc.mask.nxv4f64.nxv4f64( - , - , - , - , - i64); - -define @intrinsic_vfnmacc_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv4f64_nxv4f64_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu -; CHECK-NEXT: vfnmacc.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmacc.mask.nxv4f64.nxv4f64( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfnmacc.nxv1f16.f16( - , - half, - , - i64); - -define @intrinsic_vfnmacc_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv1f16_f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu -; CHECK-NEXT: vfnmacc.vf v8, fa0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmacc.nxv1f16.f16( - %0, - half %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfnmacc.mask.nxv1f16.f16( - , - half, - , - , - i64); - -define @intrinsic_vfnmacc_mask_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv1f16_f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu -; CHECK-NEXT: vfnmacc.vf v8, fa0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmacc.mask.nxv1f16.f16( - %0, - half %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfnmacc.nxv2f16.f16( - , - half, - , - i64); - -define @intrinsic_vfnmacc_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv2f16_f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu -; CHECK-NEXT: vfnmacc.vf v8, fa0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmacc.nxv2f16.f16( - %0, - half %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfnmacc.mask.nxv2f16.f16( - , - half, - , - , - i64); - -define @intrinsic_vfnmacc_mask_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv2f16_f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu -; CHECK-NEXT: vfnmacc.vf v8, fa0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmacc.mask.nxv2f16.f16( - %0, - half %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfnmacc.nxv4f16.f16( - , - half, - , - i64); - -define @intrinsic_vfnmacc_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv4f16_f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu -; CHECK-NEXT: vfnmacc.vf v8, fa0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmacc.nxv4f16.f16( - %0, - half %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfnmacc.mask.nxv4f16.f16( - , - half, - , - , - i64); - -define @intrinsic_vfnmacc_mask_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv4f16_f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu -; CHECK-NEXT: vfnmacc.vf v8, fa0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmacc.mask.nxv4f16.f16( - %0, - half %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfnmacc.nxv8f16.f16( - , - half, - , - i64); - -define @intrinsic_vfnmacc_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv8f16_f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu -; CHECK-NEXT: vfnmacc.vf v8, fa0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmacc.nxv8f16.f16( - %0, - half %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfnmacc.mask.nxv8f16.f16( - , - half, - , - , - i64); - -define @intrinsic_vfnmacc_mask_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv8f16_f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu -; CHECK-NEXT: vfnmacc.vf v8, fa0, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmacc.mask.nxv8f16.f16( - %0, - half %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfnmacc.nxv16f16.f16( - , - half, - , - i64); - -define @intrinsic_vfnmacc_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv16f16_f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu -; CHECK-NEXT: vfnmacc.vf v8, fa0, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmacc.nxv16f16.f16( - %0, - half %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfnmacc.mask.nxv16f16.f16( - , - half, - , - , - i64); - -define @intrinsic_vfnmacc_mask_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv16f16_f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu -; CHECK-NEXT: vfnmacc.vf v8, fa0, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmacc.mask.nxv16f16.f16( - %0, - half %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfnmacc.nxv1f32.f32( - , - float, - , - i64); - -define @intrinsic_vfnmacc_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv1f32_f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu -; CHECK-NEXT: vfnmacc.vf v8, fa0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmacc.nxv1f32.f32( - %0, - float %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfnmacc.mask.nxv1f32.f32( - , - float, - , - , - i64); - -define @intrinsic_vfnmacc_mask_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv1f32_f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu -; CHECK-NEXT: vfnmacc.vf v8, fa0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmacc.mask.nxv1f32.f32( - %0, - float %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfnmacc.nxv2f32.f32( - , - float, - , - i64); - -define @intrinsic_vfnmacc_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv2f32_f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu -; CHECK-NEXT: vfnmacc.vf v8, fa0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmacc.nxv2f32.f32( - %0, - float %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfnmacc.mask.nxv2f32.f32( - , - float, - , - , - i64); - -define @intrinsic_vfnmacc_mask_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv2f32_f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu -; CHECK-NEXT: vfnmacc.vf v8, fa0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmacc.mask.nxv2f32.f32( - %0, - float %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfnmacc.nxv4f32.f32( - , - float, - , - i64); - -define @intrinsic_vfnmacc_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv4f32_f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu -; CHECK-NEXT: vfnmacc.vf v8, fa0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmacc.nxv4f32.f32( - %0, - float %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfnmacc.mask.nxv4f32.f32( - , - float, - , - , - i64); - -define @intrinsic_vfnmacc_mask_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv4f32_f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu -; CHECK-NEXT: vfnmacc.vf v8, fa0, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmacc.mask.nxv4f32.f32( - %0, - float %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfnmacc.nxv8f32.f32( - , - float, - , - i64); - -define @intrinsic_vfnmacc_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv8f32_f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu -; CHECK-NEXT: vfnmacc.vf v8, fa0, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmacc.nxv8f32.f32( - %0, - float %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfnmacc.mask.nxv8f32.f32( - , - float, - , - , - i64); - -define @intrinsic_vfnmacc_mask_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv8f32_f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu -; CHECK-NEXT: vfnmacc.vf v8, fa0, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmacc.mask.nxv8f32.f32( - %0, - float %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfnmacc.nxv1f64.f64( - , - double, - , - i64); - -define @intrinsic_vfnmacc_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv1f64_f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu -; CHECK-NEXT: vfnmacc.vf v8, fa0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmacc.nxv1f64.f64( - %0, - double %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfnmacc.mask.nxv1f64.f64( - , - double, - , - , - i64); - -define @intrinsic_vfnmacc_mask_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv1f64_f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu -; CHECK-NEXT: vfnmacc.vf v8, fa0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmacc.mask.nxv1f64.f64( - %0, - double %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfnmacc.nxv2f64.f64( - , - double, - , - i64); - -define @intrinsic_vfnmacc_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv2f64_f64_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu -; CHECK-NEXT: vfnmacc.vf v8, fa0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmacc.nxv2f64.f64( - %0, - double %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfnmacc.mask.nxv2f64.f64( - , - double, - , - , - i64); - -define @intrinsic_vfnmacc_mask_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv2f64_f64_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu -; CHECK-NEXT: vfnmacc.vf v8, fa0, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmacc.mask.nxv2f64.f64( - %0, - double %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfnmacc.nxv4f64.f64( - , - double, - , - i64); - -define @intrinsic_vfnmacc_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv4f64_f64_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu -; CHECK-NEXT: vfnmacc.vf v8, fa0, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmacc.nxv4f64.f64( - %0, - double %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfnmacc.mask.nxv4f64.f64( - , - double, - , - , - i64); - -define @intrinsic_vfnmacc_mask_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv4f64_f64_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu -; CHECK-NEXT: vfnmacc.vf v8, fa0, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmacc.mask.nxv4f64.f64( - %0, - double %1, - %2, - %3, - i64 %4) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfnmacc-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmacc.ll similarity index 90% rename from llvm/test/CodeGen/RISCV/rvv/vfnmacc-rv32.ll rename to llvm/test/CodeGen/RISCV/rvv/vfnmacc.ll index fa1767202c12618d7687a441845ef72549fba258..d46c29f3be78b287ae9236f530251052c845bcda 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfnmacc-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfnmacc.ll @@ -1,13 +1,15 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: -target-abi=ilp32d < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfnmacc.nxv1f16.nxv1f16( , , , - i32); + iXLen); -define @intrinsic_vfnmacc_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfnmacc_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu @@ -18,7 +20,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -28,9 +30,9 @@ declare @llvm.riscv.vfnmacc.mask.nxv1f16.nxv1f16( , , , - i32); + iXLen); -define @intrinsic_vfnmacc_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfnmacc_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu @@ -42,7 +44,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -51,9 +53,9 @@ declare @llvm.riscv.vfnmacc.nxv2f16.nxv2f16( , , , - i32); + iXLen); -define @intrinsic_vfnmacc_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfnmacc_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu @@ -64,7 +66,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -74,9 +76,9 @@ declare @llvm.riscv.vfnmacc.mask.nxv2f16.nxv2f16( , , , - i32); + iXLen); -define @intrinsic_vfnmacc_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfnmacc_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu @@ -88,7 +90,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -97,9 +99,9 @@ declare @llvm.riscv.vfnmacc.nxv4f16.nxv4f16( , , , - i32); + iXLen); -define @intrinsic_vfnmacc_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfnmacc_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu @@ -110,7 +112,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -120,9 +122,9 @@ declare @llvm.riscv.vfnmacc.mask.nxv4f16.nxv4f16( , , , - i32); + iXLen); -define @intrinsic_vfnmacc_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfnmacc_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu @@ -134,7 +136,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -143,9 +145,9 @@ declare @llvm.riscv.vfnmacc.nxv8f16.nxv8f16( , , , - i32); + iXLen); -define @intrinsic_vfnmacc_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfnmacc_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu @@ -156,7 +158,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -166,9 +168,9 @@ declare @llvm.riscv.vfnmacc.mask.nxv8f16.nxv8f16( , , , - i32); + iXLen); -define @intrinsic_vfnmacc_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfnmacc_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu @@ -180,7 +182,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -189,9 +191,9 @@ declare @llvm.riscv.vfnmacc.nxv16f16.nxv16f16( , , , - i32); + iXLen); -define @intrinsic_vfnmacc_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfnmacc_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu @@ -202,7 +204,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -212,9 +214,9 @@ declare @llvm.riscv.vfnmacc.mask.nxv16f16.nxv16f16( , , , - i32); + iXLen); -define @intrinsic_vfnmacc_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfnmacc_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu @@ -226,7 +228,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -235,9 +237,9 @@ declare @llvm.riscv.vfnmacc.nxv1f32.nxv1f32( , , , - i32); + iXLen); -define @intrinsic_vfnmacc_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfnmacc_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu @@ -248,7 +250,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -258,9 +260,9 @@ declare @llvm.riscv.vfnmacc.mask.nxv1f32.nxv1f32( , , , - i32); + iXLen); -define @intrinsic_vfnmacc_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfnmacc_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu @@ -272,7 +274,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -281,9 +283,9 @@ declare @llvm.riscv.vfnmacc.nxv2f32.nxv2f32( , , , - i32); + iXLen); -define @intrinsic_vfnmacc_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfnmacc_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu @@ -294,7 +296,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -304,9 +306,9 @@ declare @llvm.riscv.vfnmacc.mask.nxv2f32.nxv2f32( , , , - i32); + iXLen); -define @intrinsic_vfnmacc_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfnmacc_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu @@ -318,7 +320,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -327,9 +329,9 @@ declare @llvm.riscv.vfnmacc.nxv4f32.nxv4f32( , , , - i32); + iXLen); -define @intrinsic_vfnmacc_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfnmacc_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu @@ -340,7 +342,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -350,9 +352,9 @@ declare @llvm.riscv.vfnmacc.mask.nxv4f32.nxv4f32( , , , - i32); + iXLen); -define @intrinsic_vfnmacc_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfnmacc_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu @@ -364,7 +366,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -373,9 +375,9 @@ declare @llvm.riscv.vfnmacc.nxv8f32.nxv8f32( , , , - i32); + iXLen); -define @intrinsic_vfnmacc_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfnmacc_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu @@ -386,7 +388,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -396,9 +398,9 @@ declare @llvm.riscv.vfnmacc.mask.nxv8f32.nxv8f32( , , , - i32); + iXLen); -define @intrinsic_vfnmacc_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfnmacc_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu @@ -410,7 +412,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -419,9 +421,9 @@ declare @llvm.riscv.vfnmacc.nxv1f64.nxv1f64( , , , - i32); + iXLen); -define @intrinsic_vfnmacc_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfnmacc_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu @@ -432,7 +434,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -442,9 +444,9 @@ declare @llvm.riscv.vfnmacc.mask.nxv1f64.nxv1f64( , , , - i32); + iXLen); -define @intrinsic_vfnmacc_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfnmacc_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu @@ -456,7 +458,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -465,9 +467,9 @@ declare @llvm.riscv.vfnmacc.nxv2f64.nxv2f64( , , , - i32); + iXLen); -define @intrinsic_vfnmacc_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfnmacc_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu @@ -478,7 +480,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -488,9 +490,9 @@ declare @llvm.riscv.vfnmacc.mask.nxv2f64.nxv2f64( , , , - i32); + iXLen); -define @intrinsic_vfnmacc_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfnmacc_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu @@ -502,7 +504,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -511,9 +513,9 @@ declare @llvm.riscv.vfnmacc.nxv4f64.nxv4f64( , , , - i32); + iXLen); -define @intrinsic_vfnmacc_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfnmacc_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu @@ -524,7 +526,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -534,9 +536,9 @@ declare @llvm.riscv.vfnmacc.mask.nxv4f64.nxv4f64( , , , - i32); + iXLen); -define @intrinsic_vfnmacc_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfnmacc_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu @@ -548,7 +550,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -557,9 +559,9 @@ declare @llvm.riscv.vfnmacc.nxv1f16.f16( , half, , - i32); + iXLen); -define @intrinsic_vfnmacc_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, i32 %3) nounwind { +define @intrinsic_vfnmacc_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv1f16_f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu @@ -570,7 +572,7 @@ entry: %0, half %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -580,9 +582,9 @@ declare @llvm.riscv.vfnmacc.mask.nxv1f16.f16( half, , , - i32); + iXLen); -define @intrinsic_vfnmacc_mask_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfnmacc_mask_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv1f16_f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu @@ -594,7 +596,7 @@ entry: half %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -603,9 +605,9 @@ declare @llvm.riscv.vfnmacc.nxv2f16.f16( , half, , - i32); + iXLen); -define @intrinsic_vfnmacc_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, i32 %3) nounwind { +define @intrinsic_vfnmacc_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv2f16_f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu @@ -616,7 +618,7 @@ entry: %0, half %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -626,9 +628,9 @@ declare @llvm.riscv.vfnmacc.mask.nxv2f16.f16( half, , , - i32); + iXLen); -define @intrinsic_vfnmacc_mask_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfnmacc_mask_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv2f16_f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu @@ -640,7 +642,7 @@ entry: half %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -649,9 +651,9 @@ declare @llvm.riscv.vfnmacc.nxv4f16.f16( , half, , - i32); + iXLen); -define @intrinsic_vfnmacc_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, i32 %3) nounwind { +define @intrinsic_vfnmacc_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv4f16_f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu @@ -662,7 +664,7 @@ entry: %0, half %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -672,9 +674,9 @@ declare @llvm.riscv.vfnmacc.mask.nxv4f16.f16( half, , , - i32); + iXLen); -define @intrinsic_vfnmacc_mask_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfnmacc_mask_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv4f16_f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu @@ -686,7 +688,7 @@ entry: half %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -695,9 +697,9 @@ declare @llvm.riscv.vfnmacc.nxv8f16.f16( , half, , - i32); + iXLen); -define @intrinsic_vfnmacc_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, i32 %3) nounwind { +define @intrinsic_vfnmacc_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv8f16_f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu @@ -708,7 +710,7 @@ entry: %0, half %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -718,9 +720,9 @@ declare @llvm.riscv.vfnmacc.mask.nxv8f16.f16( half, , , - i32); + iXLen); -define @intrinsic_vfnmacc_mask_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfnmacc_mask_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv8f16_f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu @@ -732,7 +734,7 @@ entry: half %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -741,9 +743,9 @@ declare @llvm.riscv.vfnmacc.nxv16f16.f16( , half, , - i32); + iXLen); -define @intrinsic_vfnmacc_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, i32 %3) nounwind { +define @intrinsic_vfnmacc_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv16f16_f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu @@ -754,7 +756,7 @@ entry: %0, half %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -764,9 +766,9 @@ declare @llvm.riscv.vfnmacc.mask.nxv16f16.f16( half, , , - i32); + iXLen); -define @intrinsic_vfnmacc_mask_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfnmacc_mask_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv16f16_f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu @@ -778,7 +780,7 @@ entry: half %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -787,9 +789,9 @@ declare @llvm.riscv.vfnmacc.nxv1f32.f32( , float, , - i32); + iXLen); -define @intrinsic_vfnmacc_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, i32 %3) nounwind { +define @intrinsic_vfnmacc_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv1f32_f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu @@ -800,7 +802,7 @@ entry: %0, float %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -810,9 +812,9 @@ declare @llvm.riscv.vfnmacc.mask.nxv1f32.f32( float, , , - i32); + iXLen); -define @intrinsic_vfnmacc_mask_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfnmacc_mask_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv1f32_f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu @@ -824,7 +826,7 @@ entry: float %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -833,9 +835,9 @@ declare @llvm.riscv.vfnmacc.nxv2f32.f32( , float, , - i32); + iXLen); -define @intrinsic_vfnmacc_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, i32 %3) nounwind { +define @intrinsic_vfnmacc_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv2f32_f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu @@ -846,7 +848,7 @@ entry: %0, float %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -856,9 +858,9 @@ declare @llvm.riscv.vfnmacc.mask.nxv2f32.f32( float, , , - i32); + iXLen); -define @intrinsic_vfnmacc_mask_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfnmacc_mask_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv2f32_f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu @@ -870,7 +872,7 @@ entry: float %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -879,9 +881,9 @@ declare @llvm.riscv.vfnmacc.nxv4f32.f32( , float, , - i32); + iXLen); -define @intrinsic_vfnmacc_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, i32 %3) nounwind { +define @intrinsic_vfnmacc_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv4f32_f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu @@ -892,7 +894,7 @@ entry: %0, float %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -902,9 +904,9 @@ declare @llvm.riscv.vfnmacc.mask.nxv4f32.f32( float, , , - i32); + iXLen); -define @intrinsic_vfnmacc_mask_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfnmacc_mask_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv4f32_f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu @@ -916,7 +918,7 @@ entry: float %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -925,9 +927,9 @@ declare @llvm.riscv.vfnmacc.nxv8f32.f32( , float, , - i32); + iXLen); -define @intrinsic_vfnmacc_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, i32 %3) nounwind { +define @intrinsic_vfnmacc_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv8f32_f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu @@ -938,7 +940,7 @@ entry: %0, float %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -948,9 +950,9 @@ declare @llvm.riscv.vfnmacc.mask.nxv8f32.f32( float, , , - i32); + iXLen); -define @intrinsic_vfnmacc_mask_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfnmacc_mask_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv8f32_f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu @@ -962,7 +964,7 @@ entry: float %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -971,9 +973,9 @@ declare @llvm.riscv.vfnmacc.nxv1f64.f64( , double, , - i32); + iXLen); -define @intrinsic_vfnmacc_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, i32 %3) nounwind { +define @intrinsic_vfnmacc_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv1f64_f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu @@ -984,7 +986,7 @@ entry: %0, double %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -994,9 +996,9 @@ declare @llvm.riscv.vfnmacc.mask.nxv1f64.f64( double, , , - i32); + iXLen); -define @intrinsic_vfnmacc_mask_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfnmacc_mask_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv1f64_f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu @@ -1008,7 +1010,7 @@ entry: double %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -1017,9 +1019,9 @@ declare @llvm.riscv.vfnmacc.nxv2f64.f64( , double, , - i32); + iXLen); -define @intrinsic_vfnmacc_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, i32 %3) nounwind { +define @intrinsic_vfnmacc_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv2f64_f64_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu @@ -1030,7 +1032,7 @@ entry: %0, double %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -1040,9 +1042,9 @@ declare @llvm.riscv.vfnmacc.mask.nxv2f64.f64( double, , , - i32); + iXLen); -define @intrinsic_vfnmacc_mask_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfnmacc_mask_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv2f64_f64_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu @@ -1054,7 +1056,7 @@ entry: double %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -1063,9 +1065,9 @@ declare @llvm.riscv.vfnmacc.nxv4f64.f64( , double, , - i32); + iXLen); -define @intrinsic_vfnmacc_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, i32 %3) nounwind { +define @intrinsic_vfnmacc_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv4f64_f64_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu @@ -1076,7 +1078,7 @@ entry: %0, double %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -1086,9 +1088,9 @@ declare @llvm.riscv.vfnmacc.mask.nxv4f64.f64( double, , , - i32); + iXLen); -define @intrinsic_vfnmacc_mask_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfnmacc_mask_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv4f64_f64_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu @@ -1100,7 +1102,7 @@ entry: double %1, %2, %3, - i32 %4) + iXLen %4) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfnmadd-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmadd-rv64.ll deleted file mode 100644 index ab407427952aca439819897d365d8bbdbd297371..0000000000000000000000000000000000000000 --- a/llvm/test/CodeGen/RISCV/rvv/vfnmadd-rv64.ll +++ /dev/null @@ -1,1106 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: -target-abi=lp64d < %s | FileCheck %s -declare @llvm.riscv.vfnmadd.nxv1f16.nxv1f16( - , - , - , - i64); - -define @intrinsic_vfnmadd_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv1f16_nxv1f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu -; CHECK-NEXT: vfnmadd.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmadd.nxv1f16.nxv1f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfnmadd.mask.nxv1f16.nxv1f16( - , - , - , - , - i64); - -define @intrinsic_vfnmadd_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv1f16_nxv1f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu -; CHECK-NEXT: vfnmadd.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmadd.mask.nxv1f16.nxv1f16( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfnmadd.nxv2f16.nxv2f16( - , - , - , - i64); - -define @intrinsic_vfnmadd_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv2f16_nxv2f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu -; CHECK-NEXT: vfnmadd.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmadd.nxv2f16.nxv2f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfnmadd.mask.nxv2f16.nxv2f16( - , - , - , - , - i64); - -define @intrinsic_vfnmadd_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv2f16_nxv2f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu -; CHECK-NEXT: vfnmadd.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmadd.mask.nxv2f16.nxv2f16( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfnmadd.nxv4f16.nxv4f16( - , - , - , - i64); - -define @intrinsic_vfnmadd_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv4f16_nxv4f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu -; CHECK-NEXT: vfnmadd.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmadd.nxv4f16.nxv4f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfnmadd.mask.nxv4f16.nxv4f16( - , - , - , - , - i64); - -define @intrinsic_vfnmadd_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv4f16_nxv4f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu -; CHECK-NEXT: vfnmadd.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmadd.mask.nxv4f16.nxv4f16( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfnmadd.nxv8f16.nxv8f16( - , - , - , - i64); - -define @intrinsic_vfnmadd_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv8f16_nxv8f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu -; CHECK-NEXT: vfnmadd.vv v8, v10, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmadd.nxv8f16.nxv8f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfnmadd.mask.nxv8f16.nxv8f16( - , - , - , - , - i64); - -define @intrinsic_vfnmadd_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv8f16_nxv8f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu -; CHECK-NEXT: vfnmadd.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmadd.mask.nxv8f16.nxv8f16( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfnmadd.nxv16f16.nxv16f16( - , - , - , - i64); - -define @intrinsic_vfnmadd_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv16f16_nxv16f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu -; CHECK-NEXT: vfnmadd.vv v8, v12, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmadd.nxv16f16.nxv16f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfnmadd.mask.nxv16f16.nxv16f16( - , - , - , - , - i64); - -define @intrinsic_vfnmadd_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv16f16_nxv16f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu -; CHECK-NEXT: vfnmadd.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmadd.mask.nxv16f16.nxv16f16( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfnmadd.nxv1f32.nxv1f32( - , - , - , - i64); - -define @intrinsic_vfnmadd_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv1f32_nxv1f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu -; CHECK-NEXT: vfnmadd.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmadd.nxv1f32.nxv1f32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfnmadd.mask.nxv1f32.nxv1f32( - , - , - , - , - i64); - -define @intrinsic_vfnmadd_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv1f32_nxv1f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu -; CHECK-NEXT: vfnmadd.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmadd.mask.nxv1f32.nxv1f32( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfnmadd.nxv2f32.nxv2f32( - , - , - , - i64); - -define @intrinsic_vfnmadd_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv2f32_nxv2f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu -; CHECK-NEXT: vfnmadd.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmadd.nxv2f32.nxv2f32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfnmadd.mask.nxv2f32.nxv2f32( - , - , - , - , - i64); - -define @intrinsic_vfnmadd_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv2f32_nxv2f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu -; CHECK-NEXT: vfnmadd.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmadd.mask.nxv2f32.nxv2f32( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfnmadd.nxv4f32.nxv4f32( - , - , - , - i64); - -define @intrinsic_vfnmadd_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv4f32_nxv4f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu -; CHECK-NEXT: vfnmadd.vv v8, v10, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmadd.nxv4f32.nxv4f32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfnmadd.mask.nxv4f32.nxv4f32( - , - , - , - , - i64); - -define @intrinsic_vfnmadd_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv4f32_nxv4f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu -; CHECK-NEXT: vfnmadd.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmadd.mask.nxv4f32.nxv4f32( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfnmadd.nxv8f32.nxv8f32( - , - , - , - i64); - -define @intrinsic_vfnmadd_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv8f32_nxv8f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu -; CHECK-NEXT: vfnmadd.vv v8, v12, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmadd.nxv8f32.nxv8f32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfnmadd.mask.nxv8f32.nxv8f32( - , - , - , - , - i64); - -define @intrinsic_vfnmadd_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv8f32_nxv8f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu -; CHECK-NEXT: vfnmadd.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmadd.mask.nxv8f32.nxv8f32( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfnmadd.nxv1f64.nxv1f64( - , - , - , - i64); - -define @intrinsic_vfnmadd_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv1f64_nxv1f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu -; CHECK-NEXT: vfnmadd.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmadd.nxv1f64.nxv1f64( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfnmadd.mask.nxv1f64.nxv1f64( - , - , - , - , - i64); - -define @intrinsic_vfnmadd_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv1f64_nxv1f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu -; CHECK-NEXT: vfnmadd.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmadd.mask.nxv1f64.nxv1f64( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfnmadd.nxv2f64.nxv2f64( - , - , - , - i64); - -define @intrinsic_vfnmadd_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv2f64_nxv2f64_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu -; CHECK-NEXT: vfnmadd.vv v8, v10, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmadd.nxv2f64.nxv2f64( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfnmadd.mask.nxv2f64.nxv2f64( - , - , - , - , - i64); - -define @intrinsic_vfnmadd_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv2f64_nxv2f64_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu -; CHECK-NEXT: vfnmadd.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmadd.mask.nxv2f64.nxv2f64( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfnmadd.nxv4f64.nxv4f64( - , - , - , - i64); - -define @intrinsic_vfnmadd_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv4f64_nxv4f64_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu -; CHECK-NEXT: vfnmadd.vv v8, v12, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmadd.nxv4f64.nxv4f64( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfnmadd.mask.nxv4f64.nxv4f64( - , - , - , - , - i64); - -define @intrinsic_vfnmadd_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv4f64_nxv4f64_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu -; CHECK-NEXT: vfnmadd.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmadd.mask.nxv4f64.nxv4f64( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfnmadd.nxv1f16.f16( - , - half, - , - i64); - -define @intrinsic_vfnmadd_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv1f16_f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu -; CHECK-NEXT: vfnmadd.vf v8, fa0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmadd.nxv1f16.f16( - %0, - half %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfnmadd.mask.nxv1f16.f16( - , - half, - , - , - i64); - -define @intrinsic_vfnmadd_mask_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv1f16_f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu -; CHECK-NEXT: vfnmadd.vf v8, fa0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmadd.mask.nxv1f16.f16( - %0, - half %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfnmadd.nxv2f16.f16( - , - half, - , - i64); - -define @intrinsic_vfnmadd_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv2f16_f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu -; CHECK-NEXT: vfnmadd.vf v8, fa0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmadd.nxv2f16.f16( - %0, - half %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfnmadd.mask.nxv2f16.f16( - , - half, - , - , - i64); - -define @intrinsic_vfnmadd_mask_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv2f16_f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu -; CHECK-NEXT: vfnmadd.vf v8, fa0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmadd.mask.nxv2f16.f16( - %0, - half %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfnmadd.nxv4f16.f16( - , - half, - , - i64); - -define @intrinsic_vfnmadd_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv4f16_f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu -; CHECK-NEXT: vfnmadd.vf v8, fa0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmadd.nxv4f16.f16( - %0, - half %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfnmadd.mask.nxv4f16.f16( - , - half, - , - , - i64); - -define @intrinsic_vfnmadd_mask_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv4f16_f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu -; CHECK-NEXT: vfnmadd.vf v8, fa0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmadd.mask.nxv4f16.f16( - %0, - half %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfnmadd.nxv8f16.f16( - , - half, - , - i64); - -define @intrinsic_vfnmadd_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv8f16_f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu -; CHECK-NEXT: vfnmadd.vf v8, fa0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmadd.nxv8f16.f16( - %0, - half %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfnmadd.mask.nxv8f16.f16( - , - half, - , - , - i64); - -define @intrinsic_vfnmadd_mask_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv8f16_f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu -; CHECK-NEXT: vfnmadd.vf v8, fa0, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmadd.mask.nxv8f16.f16( - %0, - half %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfnmadd.nxv16f16.f16( - , - half, - , - i64); - -define @intrinsic_vfnmadd_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv16f16_f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu -; CHECK-NEXT: vfnmadd.vf v8, fa0, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmadd.nxv16f16.f16( - %0, - half %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfnmadd.mask.nxv16f16.f16( - , - half, - , - , - i64); - -define @intrinsic_vfnmadd_mask_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv16f16_f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu -; CHECK-NEXT: vfnmadd.vf v8, fa0, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmadd.mask.nxv16f16.f16( - %0, - half %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfnmadd.nxv1f32.f32( - , - float, - , - i64); - -define @intrinsic_vfnmadd_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv1f32_f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu -; CHECK-NEXT: vfnmadd.vf v8, fa0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmadd.nxv1f32.f32( - %0, - float %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfnmadd.mask.nxv1f32.f32( - , - float, - , - , - i64); - -define @intrinsic_vfnmadd_mask_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv1f32_f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu -; CHECK-NEXT: vfnmadd.vf v8, fa0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmadd.mask.nxv1f32.f32( - %0, - float %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfnmadd.nxv2f32.f32( - , - float, - , - i64); - -define @intrinsic_vfnmadd_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv2f32_f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu -; CHECK-NEXT: vfnmadd.vf v8, fa0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmadd.nxv2f32.f32( - %0, - float %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfnmadd.mask.nxv2f32.f32( - , - float, - , - , - i64); - -define @intrinsic_vfnmadd_mask_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv2f32_f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu -; CHECK-NEXT: vfnmadd.vf v8, fa0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmadd.mask.nxv2f32.f32( - %0, - float %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfnmadd.nxv4f32.f32( - , - float, - , - i64); - -define @intrinsic_vfnmadd_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv4f32_f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu -; CHECK-NEXT: vfnmadd.vf v8, fa0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmadd.nxv4f32.f32( - %0, - float %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfnmadd.mask.nxv4f32.f32( - , - float, - , - , - i64); - -define @intrinsic_vfnmadd_mask_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv4f32_f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu -; CHECK-NEXT: vfnmadd.vf v8, fa0, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmadd.mask.nxv4f32.f32( - %0, - float %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfnmadd.nxv8f32.f32( - , - float, - , - i64); - -define @intrinsic_vfnmadd_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv8f32_f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu -; CHECK-NEXT: vfnmadd.vf v8, fa0, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmadd.nxv8f32.f32( - %0, - float %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfnmadd.mask.nxv8f32.f32( - , - float, - , - , - i64); - -define @intrinsic_vfnmadd_mask_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv8f32_f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu -; CHECK-NEXT: vfnmadd.vf v8, fa0, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmadd.mask.nxv8f32.f32( - %0, - float %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfnmadd.nxv1f64.f64( - , - double, - , - i64); - -define @intrinsic_vfnmadd_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv1f64_f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu -; CHECK-NEXT: vfnmadd.vf v8, fa0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmadd.nxv1f64.f64( - %0, - double %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfnmadd.mask.nxv1f64.f64( - , - double, - , - , - i64); - -define @intrinsic_vfnmadd_mask_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv1f64_f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu -; CHECK-NEXT: vfnmadd.vf v8, fa0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmadd.mask.nxv1f64.f64( - %0, - double %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfnmadd.nxv2f64.f64( - , - double, - , - i64); - -define @intrinsic_vfnmadd_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv2f64_f64_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu -; CHECK-NEXT: vfnmadd.vf v8, fa0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmadd.nxv2f64.f64( - %0, - double %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfnmadd.mask.nxv2f64.f64( - , - double, - , - , - i64); - -define @intrinsic_vfnmadd_mask_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv2f64_f64_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu -; CHECK-NEXT: vfnmadd.vf v8, fa0, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmadd.mask.nxv2f64.f64( - %0, - double %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfnmadd.nxv4f64.f64( - , - double, - , - i64); - -define @intrinsic_vfnmadd_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv4f64_f64_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu -; CHECK-NEXT: vfnmadd.vf v8, fa0, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmadd.nxv4f64.f64( - %0, - double %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfnmadd.mask.nxv4f64.f64( - , - double, - , - , - i64); - -define @intrinsic_vfnmadd_mask_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv4f64_f64_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu -; CHECK-NEXT: vfnmadd.vf v8, fa0, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmadd.mask.nxv4f64.f64( - %0, - double %1, - %2, - %3, - i64 %4) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfnmadd-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmadd.ll similarity index 90% rename from llvm/test/CodeGen/RISCV/rvv/vfnmadd-rv32.ll rename to llvm/test/CodeGen/RISCV/rvv/vfnmadd.ll index e0d33062322b861ec57d61428de3b0a1de776dd5..44810af5ab31b6c1c22dd197fb2a41b5195b6c31 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfnmadd-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfnmadd.ll @@ -1,13 +1,15 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: -target-abi=ilp32d < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfnmadd.nxv1f16.nxv1f16( , , , - i32); + iXLen); -define @intrinsic_vfnmadd_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfnmadd_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu @@ -18,7 +20,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -28,9 +30,9 @@ declare @llvm.riscv.vfnmadd.mask.nxv1f16.nxv1f16( , , , - i32); + iXLen); -define @intrinsic_vfnmadd_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfnmadd_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu @@ -42,7 +44,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -51,9 +53,9 @@ declare @llvm.riscv.vfnmadd.nxv2f16.nxv2f16( , , , - i32); + iXLen); -define @intrinsic_vfnmadd_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfnmadd_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu @@ -64,7 +66,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -74,9 +76,9 @@ declare @llvm.riscv.vfnmadd.mask.nxv2f16.nxv2f16( , , , - i32); + iXLen); -define @intrinsic_vfnmadd_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfnmadd_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu @@ -88,7 +90,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -97,9 +99,9 @@ declare @llvm.riscv.vfnmadd.nxv4f16.nxv4f16( , , , - i32); + iXLen); -define @intrinsic_vfnmadd_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfnmadd_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu @@ -110,7 +112,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -120,9 +122,9 @@ declare @llvm.riscv.vfnmadd.mask.nxv4f16.nxv4f16( , , , - i32); + iXLen); -define @intrinsic_vfnmadd_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfnmadd_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu @@ -134,7 +136,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -143,9 +145,9 @@ declare @llvm.riscv.vfnmadd.nxv8f16.nxv8f16( , , , - i32); + iXLen); -define @intrinsic_vfnmadd_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfnmadd_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu @@ -156,7 +158,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -166,9 +168,9 @@ declare @llvm.riscv.vfnmadd.mask.nxv8f16.nxv8f16( , , , - i32); + iXLen); -define @intrinsic_vfnmadd_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfnmadd_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu @@ -180,7 +182,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -189,9 +191,9 @@ declare @llvm.riscv.vfnmadd.nxv16f16.nxv16f16( , , , - i32); + iXLen); -define @intrinsic_vfnmadd_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfnmadd_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu @@ -202,7 +204,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -212,9 +214,9 @@ declare @llvm.riscv.vfnmadd.mask.nxv16f16.nxv16f16( , , , - i32); + iXLen); -define @intrinsic_vfnmadd_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfnmadd_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu @@ -226,7 +228,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -235,9 +237,9 @@ declare @llvm.riscv.vfnmadd.nxv1f32.nxv1f32( , , , - i32); + iXLen); -define @intrinsic_vfnmadd_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfnmadd_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu @@ -248,7 +250,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -258,9 +260,9 @@ declare @llvm.riscv.vfnmadd.mask.nxv1f32.nxv1f32( , , , - i32); + iXLen); -define @intrinsic_vfnmadd_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfnmadd_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu @@ -272,7 +274,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -281,9 +283,9 @@ declare @llvm.riscv.vfnmadd.nxv2f32.nxv2f32( , , , - i32); + iXLen); -define @intrinsic_vfnmadd_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfnmadd_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu @@ -294,7 +296,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -304,9 +306,9 @@ declare @llvm.riscv.vfnmadd.mask.nxv2f32.nxv2f32( , , , - i32); + iXLen); -define @intrinsic_vfnmadd_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfnmadd_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu @@ -318,7 +320,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -327,9 +329,9 @@ declare @llvm.riscv.vfnmadd.nxv4f32.nxv4f32( , , , - i32); + iXLen); -define @intrinsic_vfnmadd_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfnmadd_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu @@ -340,7 +342,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -350,9 +352,9 @@ declare @llvm.riscv.vfnmadd.mask.nxv4f32.nxv4f32( , , , - i32); + iXLen); -define @intrinsic_vfnmadd_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfnmadd_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu @@ -364,7 +366,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -373,9 +375,9 @@ declare @llvm.riscv.vfnmadd.nxv8f32.nxv8f32( , , , - i32); + iXLen); -define @intrinsic_vfnmadd_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfnmadd_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu @@ -386,7 +388,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -396,9 +398,9 @@ declare @llvm.riscv.vfnmadd.mask.nxv8f32.nxv8f32( , , , - i32); + iXLen); -define @intrinsic_vfnmadd_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfnmadd_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu @@ -410,7 +412,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -419,9 +421,9 @@ declare @llvm.riscv.vfnmadd.nxv1f64.nxv1f64( , , , - i32); + iXLen); -define @intrinsic_vfnmadd_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfnmadd_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu @@ -432,7 +434,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -442,9 +444,9 @@ declare @llvm.riscv.vfnmadd.mask.nxv1f64.nxv1f64( , , , - i32); + iXLen); -define @intrinsic_vfnmadd_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfnmadd_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu @@ -456,7 +458,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -465,9 +467,9 @@ declare @llvm.riscv.vfnmadd.nxv2f64.nxv2f64( , , , - i32); + iXLen); -define @intrinsic_vfnmadd_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfnmadd_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu @@ -478,7 +480,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -488,9 +490,9 @@ declare @llvm.riscv.vfnmadd.mask.nxv2f64.nxv2f64( , , , - i32); + iXLen); -define @intrinsic_vfnmadd_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfnmadd_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu @@ -502,7 +504,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -511,9 +513,9 @@ declare @llvm.riscv.vfnmadd.nxv4f64.nxv4f64( , , , - i32); + iXLen); -define @intrinsic_vfnmadd_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfnmadd_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu @@ -524,7 +526,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -534,9 +536,9 @@ declare @llvm.riscv.vfnmadd.mask.nxv4f64.nxv4f64( , , , - i32); + iXLen); -define @intrinsic_vfnmadd_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfnmadd_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu @@ -548,7 +550,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -557,9 +559,9 @@ declare @llvm.riscv.vfnmadd.nxv1f16.f16( , half, , - i32); + iXLen); -define @intrinsic_vfnmadd_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, i32 %3) nounwind { +define @intrinsic_vfnmadd_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv1f16_f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu @@ -570,7 +572,7 @@ entry: %0, half %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -580,9 +582,9 @@ declare @llvm.riscv.vfnmadd.mask.nxv1f16.f16( half, , , - i32); + iXLen); -define @intrinsic_vfnmadd_mask_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfnmadd_mask_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv1f16_f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu @@ -594,7 +596,7 @@ entry: half %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -603,9 +605,9 @@ declare @llvm.riscv.vfnmadd.nxv2f16.f16( , half, , - i32); + iXLen); -define @intrinsic_vfnmadd_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, i32 %3) nounwind { +define @intrinsic_vfnmadd_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv2f16_f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu @@ -616,7 +618,7 @@ entry: %0, half %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -626,9 +628,9 @@ declare @llvm.riscv.vfnmadd.mask.nxv2f16.f16( half, , , - i32); + iXLen); -define @intrinsic_vfnmadd_mask_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfnmadd_mask_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv2f16_f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu @@ -640,7 +642,7 @@ entry: half %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -649,9 +651,9 @@ declare @llvm.riscv.vfnmadd.nxv4f16.f16( , half, , - i32); + iXLen); -define @intrinsic_vfnmadd_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, i32 %3) nounwind { +define @intrinsic_vfnmadd_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv4f16_f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu @@ -662,7 +664,7 @@ entry: %0, half %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -672,9 +674,9 @@ declare @llvm.riscv.vfnmadd.mask.nxv4f16.f16( half, , , - i32); + iXLen); -define @intrinsic_vfnmadd_mask_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfnmadd_mask_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv4f16_f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu @@ -686,7 +688,7 @@ entry: half %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -695,9 +697,9 @@ declare @llvm.riscv.vfnmadd.nxv8f16.f16( , half, , - i32); + iXLen); -define @intrinsic_vfnmadd_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, i32 %3) nounwind { +define @intrinsic_vfnmadd_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv8f16_f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu @@ -708,7 +710,7 @@ entry: %0, half %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -718,9 +720,9 @@ declare @llvm.riscv.vfnmadd.mask.nxv8f16.f16( half, , , - i32); + iXLen); -define @intrinsic_vfnmadd_mask_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfnmadd_mask_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv8f16_f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu @@ -732,7 +734,7 @@ entry: half %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -741,9 +743,9 @@ declare @llvm.riscv.vfnmadd.nxv16f16.f16( , half, , - i32); + iXLen); -define @intrinsic_vfnmadd_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, i32 %3) nounwind { +define @intrinsic_vfnmadd_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv16f16_f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu @@ -754,7 +756,7 @@ entry: %0, half %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -764,9 +766,9 @@ declare @llvm.riscv.vfnmadd.mask.nxv16f16.f16( half, , , - i32); + iXLen); -define @intrinsic_vfnmadd_mask_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfnmadd_mask_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv16f16_f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu @@ -778,7 +780,7 @@ entry: half %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -787,9 +789,9 @@ declare @llvm.riscv.vfnmadd.nxv1f32.f32( , float, , - i32); + iXLen); -define @intrinsic_vfnmadd_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, i32 %3) nounwind { +define @intrinsic_vfnmadd_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv1f32_f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu @@ -800,7 +802,7 @@ entry: %0, float %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -810,9 +812,9 @@ declare @llvm.riscv.vfnmadd.mask.nxv1f32.f32( float, , , - i32); + iXLen); -define @intrinsic_vfnmadd_mask_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfnmadd_mask_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv1f32_f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu @@ -824,7 +826,7 @@ entry: float %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -833,9 +835,9 @@ declare @llvm.riscv.vfnmadd.nxv2f32.f32( , float, , - i32); + iXLen); -define @intrinsic_vfnmadd_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, i32 %3) nounwind { +define @intrinsic_vfnmadd_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv2f32_f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu @@ -846,7 +848,7 @@ entry: %0, float %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -856,9 +858,9 @@ declare @llvm.riscv.vfnmadd.mask.nxv2f32.f32( float, , , - i32); + iXLen); -define @intrinsic_vfnmadd_mask_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfnmadd_mask_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv2f32_f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu @@ -870,7 +872,7 @@ entry: float %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -879,9 +881,9 @@ declare @llvm.riscv.vfnmadd.nxv4f32.f32( , float, , - i32); + iXLen); -define @intrinsic_vfnmadd_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, i32 %3) nounwind { +define @intrinsic_vfnmadd_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv4f32_f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu @@ -892,7 +894,7 @@ entry: %0, float %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -902,9 +904,9 @@ declare @llvm.riscv.vfnmadd.mask.nxv4f32.f32( float, , , - i32); + iXLen); -define @intrinsic_vfnmadd_mask_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfnmadd_mask_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv4f32_f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu @@ -916,7 +918,7 @@ entry: float %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -925,9 +927,9 @@ declare @llvm.riscv.vfnmadd.nxv8f32.f32( , float, , - i32); + iXLen); -define @intrinsic_vfnmadd_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, i32 %3) nounwind { +define @intrinsic_vfnmadd_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv8f32_f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu @@ -938,7 +940,7 @@ entry: %0, float %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -948,9 +950,9 @@ declare @llvm.riscv.vfnmadd.mask.nxv8f32.f32( float, , , - i32); + iXLen); -define @intrinsic_vfnmadd_mask_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfnmadd_mask_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv8f32_f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu @@ -962,7 +964,7 @@ entry: float %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -971,9 +973,9 @@ declare @llvm.riscv.vfnmadd.nxv1f64.f64( , double, , - i32); + iXLen); -define @intrinsic_vfnmadd_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, i32 %3) nounwind { +define @intrinsic_vfnmadd_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv1f64_f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu @@ -984,7 +986,7 @@ entry: %0, double %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -994,9 +996,9 @@ declare @llvm.riscv.vfnmadd.mask.nxv1f64.f64( double, , , - i32); + iXLen); -define @intrinsic_vfnmadd_mask_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfnmadd_mask_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv1f64_f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu @@ -1008,7 +1010,7 @@ entry: double %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -1017,9 +1019,9 @@ declare @llvm.riscv.vfnmadd.nxv2f64.f64( , double, , - i32); + iXLen); -define @intrinsic_vfnmadd_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, i32 %3) nounwind { +define @intrinsic_vfnmadd_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv2f64_f64_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu @@ -1030,7 +1032,7 @@ entry: %0, double %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -1040,9 +1042,9 @@ declare @llvm.riscv.vfnmadd.mask.nxv2f64.f64( double, , , - i32); + iXLen); -define @intrinsic_vfnmadd_mask_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfnmadd_mask_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv2f64_f64_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu @@ -1054,7 +1056,7 @@ entry: double %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -1063,9 +1065,9 @@ declare @llvm.riscv.vfnmadd.nxv4f64.f64( , double, , - i32); + iXLen); -define @intrinsic_vfnmadd_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, i32 %3) nounwind { +define @intrinsic_vfnmadd_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv4f64_f64_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu @@ -1076,7 +1078,7 @@ entry: %0, double %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -1086,9 +1088,9 @@ declare @llvm.riscv.vfnmadd.mask.nxv4f64.f64( double, , , - i32); + iXLen); -define @intrinsic_vfnmadd_mask_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfnmadd_mask_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv4f64_f64_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu @@ -1100,7 +1102,7 @@ entry: double %1, %2, %3, - i32 %4) + iXLen %4) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfnmsac-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmsac-rv64.ll deleted file mode 100644 index 58e489618bc4e54ce8fc5434b4d87168d7217147..0000000000000000000000000000000000000000 --- a/llvm/test/CodeGen/RISCV/rvv/vfnmsac-rv64.ll +++ /dev/null @@ -1,1106 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: -target-abi=lp64d < %s | FileCheck %s -declare @llvm.riscv.vfnmsac.nxv1f16.nxv1f16( - , - , - , - i64); - -define @intrinsic_vfnmsac_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv1f16_nxv1f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu -; CHECK-NEXT: vfnmsac.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsac.nxv1f16.nxv1f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfnmsac.mask.nxv1f16.nxv1f16( - , - , - , - , - i64); - -define @intrinsic_vfnmsac_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv1f16_nxv1f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu -; CHECK-NEXT: vfnmsac.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsac.mask.nxv1f16.nxv1f16( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfnmsac.nxv2f16.nxv2f16( - , - , - , - i64); - -define @intrinsic_vfnmsac_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv2f16_nxv2f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu -; CHECK-NEXT: vfnmsac.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsac.nxv2f16.nxv2f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfnmsac.mask.nxv2f16.nxv2f16( - , - , - , - , - i64); - -define @intrinsic_vfnmsac_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv2f16_nxv2f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu -; CHECK-NEXT: vfnmsac.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsac.mask.nxv2f16.nxv2f16( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfnmsac.nxv4f16.nxv4f16( - , - , - , - i64); - -define @intrinsic_vfnmsac_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv4f16_nxv4f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu -; CHECK-NEXT: vfnmsac.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsac.nxv4f16.nxv4f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfnmsac.mask.nxv4f16.nxv4f16( - , - , - , - , - i64); - -define @intrinsic_vfnmsac_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv4f16_nxv4f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu -; CHECK-NEXT: vfnmsac.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsac.mask.nxv4f16.nxv4f16( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfnmsac.nxv8f16.nxv8f16( - , - , - , - i64); - -define @intrinsic_vfnmsac_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv8f16_nxv8f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu -; CHECK-NEXT: vfnmsac.vv v8, v10, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsac.nxv8f16.nxv8f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfnmsac.mask.nxv8f16.nxv8f16( - , - , - , - , - i64); - -define @intrinsic_vfnmsac_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv8f16_nxv8f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu -; CHECK-NEXT: vfnmsac.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsac.mask.nxv8f16.nxv8f16( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfnmsac.nxv16f16.nxv16f16( - , - , - , - i64); - -define @intrinsic_vfnmsac_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv16f16_nxv16f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu -; CHECK-NEXT: vfnmsac.vv v8, v12, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsac.nxv16f16.nxv16f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfnmsac.mask.nxv16f16.nxv16f16( - , - , - , - , - i64); - -define @intrinsic_vfnmsac_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv16f16_nxv16f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu -; CHECK-NEXT: vfnmsac.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsac.mask.nxv16f16.nxv16f16( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfnmsac.nxv1f32.nxv1f32( - , - , - , - i64); - -define @intrinsic_vfnmsac_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv1f32_nxv1f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu -; CHECK-NEXT: vfnmsac.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsac.nxv1f32.nxv1f32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfnmsac.mask.nxv1f32.nxv1f32( - , - , - , - , - i64); - -define @intrinsic_vfnmsac_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv1f32_nxv1f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu -; CHECK-NEXT: vfnmsac.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsac.mask.nxv1f32.nxv1f32( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfnmsac.nxv2f32.nxv2f32( - , - , - , - i64); - -define @intrinsic_vfnmsac_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv2f32_nxv2f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu -; CHECK-NEXT: vfnmsac.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsac.nxv2f32.nxv2f32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfnmsac.mask.nxv2f32.nxv2f32( - , - , - , - , - i64); - -define @intrinsic_vfnmsac_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv2f32_nxv2f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu -; CHECK-NEXT: vfnmsac.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsac.mask.nxv2f32.nxv2f32( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfnmsac.nxv4f32.nxv4f32( - , - , - , - i64); - -define @intrinsic_vfnmsac_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv4f32_nxv4f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu -; CHECK-NEXT: vfnmsac.vv v8, v10, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsac.nxv4f32.nxv4f32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfnmsac.mask.nxv4f32.nxv4f32( - , - , - , - , - i64); - -define @intrinsic_vfnmsac_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv4f32_nxv4f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu -; CHECK-NEXT: vfnmsac.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsac.mask.nxv4f32.nxv4f32( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfnmsac.nxv8f32.nxv8f32( - , - , - , - i64); - -define @intrinsic_vfnmsac_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv8f32_nxv8f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu -; CHECK-NEXT: vfnmsac.vv v8, v12, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsac.nxv8f32.nxv8f32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfnmsac.mask.nxv8f32.nxv8f32( - , - , - , - , - i64); - -define @intrinsic_vfnmsac_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv8f32_nxv8f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu -; CHECK-NEXT: vfnmsac.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsac.mask.nxv8f32.nxv8f32( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfnmsac.nxv1f64.nxv1f64( - , - , - , - i64); - -define @intrinsic_vfnmsac_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv1f64_nxv1f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu -; CHECK-NEXT: vfnmsac.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsac.nxv1f64.nxv1f64( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfnmsac.mask.nxv1f64.nxv1f64( - , - , - , - , - i64); - -define @intrinsic_vfnmsac_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv1f64_nxv1f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu -; CHECK-NEXT: vfnmsac.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsac.mask.nxv1f64.nxv1f64( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfnmsac.nxv2f64.nxv2f64( - , - , - , - i64); - -define @intrinsic_vfnmsac_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv2f64_nxv2f64_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu -; CHECK-NEXT: vfnmsac.vv v8, v10, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsac.nxv2f64.nxv2f64( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfnmsac.mask.nxv2f64.nxv2f64( - , - , - , - , - i64); - -define @intrinsic_vfnmsac_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv2f64_nxv2f64_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu -; CHECK-NEXT: vfnmsac.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsac.mask.nxv2f64.nxv2f64( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfnmsac.nxv4f64.nxv4f64( - , - , - , - i64); - -define @intrinsic_vfnmsac_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv4f64_nxv4f64_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu -; CHECK-NEXT: vfnmsac.vv v8, v12, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsac.nxv4f64.nxv4f64( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfnmsac.mask.nxv4f64.nxv4f64( - , - , - , - , - i64); - -define @intrinsic_vfnmsac_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv4f64_nxv4f64_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu -; CHECK-NEXT: vfnmsac.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsac.mask.nxv4f64.nxv4f64( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfnmsac.nxv1f16.f16( - , - half, - , - i64); - -define @intrinsic_vfnmsac_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv1f16_f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu -; CHECK-NEXT: vfnmsac.vf v8, fa0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsac.nxv1f16.f16( - %0, - half %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfnmsac.mask.nxv1f16.f16( - , - half, - , - , - i64); - -define @intrinsic_vfnmsac_mask_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv1f16_f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu -; CHECK-NEXT: vfnmsac.vf v8, fa0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsac.mask.nxv1f16.f16( - %0, - half %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfnmsac.nxv2f16.f16( - , - half, - , - i64); - -define @intrinsic_vfnmsac_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv2f16_f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu -; CHECK-NEXT: vfnmsac.vf v8, fa0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsac.nxv2f16.f16( - %0, - half %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfnmsac.mask.nxv2f16.f16( - , - half, - , - , - i64); - -define @intrinsic_vfnmsac_mask_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv2f16_f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu -; CHECK-NEXT: vfnmsac.vf v8, fa0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsac.mask.nxv2f16.f16( - %0, - half %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfnmsac.nxv4f16.f16( - , - half, - , - i64); - -define @intrinsic_vfnmsac_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv4f16_f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu -; CHECK-NEXT: vfnmsac.vf v8, fa0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsac.nxv4f16.f16( - %0, - half %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfnmsac.mask.nxv4f16.f16( - , - half, - , - , - i64); - -define @intrinsic_vfnmsac_mask_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv4f16_f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu -; CHECK-NEXT: vfnmsac.vf v8, fa0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsac.mask.nxv4f16.f16( - %0, - half %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfnmsac.nxv8f16.f16( - , - half, - , - i64); - -define @intrinsic_vfnmsac_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv8f16_f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu -; CHECK-NEXT: vfnmsac.vf v8, fa0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsac.nxv8f16.f16( - %0, - half %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfnmsac.mask.nxv8f16.f16( - , - half, - , - , - i64); - -define @intrinsic_vfnmsac_mask_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv8f16_f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu -; CHECK-NEXT: vfnmsac.vf v8, fa0, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsac.mask.nxv8f16.f16( - %0, - half %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfnmsac.nxv16f16.f16( - , - half, - , - i64); - -define @intrinsic_vfnmsac_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv16f16_f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu -; CHECK-NEXT: vfnmsac.vf v8, fa0, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsac.nxv16f16.f16( - %0, - half %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfnmsac.mask.nxv16f16.f16( - , - half, - , - , - i64); - -define @intrinsic_vfnmsac_mask_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv16f16_f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu -; CHECK-NEXT: vfnmsac.vf v8, fa0, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsac.mask.nxv16f16.f16( - %0, - half %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfnmsac.nxv1f32.f32( - , - float, - , - i64); - -define @intrinsic_vfnmsac_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv1f32_f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu -; CHECK-NEXT: vfnmsac.vf v8, fa0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsac.nxv1f32.f32( - %0, - float %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfnmsac.mask.nxv1f32.f32( - , - float, - , - , - i64); - -define @intrinsic_vfnmsac_mask_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv1f32_f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu -; CHECK-NEXT: vfnmsac.vf v8, fa0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsac.mask.nxv1f32.f32( - %0, - float %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfnmsac.nxv2f32.f32( - , - float, - , - i64); - -define @intrinsic_vfnmsac_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv2f32_f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu -; CHECK-NEXT: vfnmsac.vf v8, fa0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsac.nxv2f32.f32( - %0, - float %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfnmsac.mask.nxv2f32.f32( - , - float, - , - , - i64); - -define @intrinsic_vfnmsac_mask_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv2f32_f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu -; CHECK-NEXT: vfnmsac.vf v8, fa0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsac.mask.nxv2f32.f32( - %0, - float %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfnmsac.nxv4f32.f32( - , - float, - , - i64); - -define @intrinsic_vfnmsac_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv4f32_f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu -; CHECK-NEXT: vfnmsac.vf v8, fa0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsac.nxv4f32.f32( - %0, - float %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfnmsac.mask.nxv4f32.f32( - , - float, - , - , - i64); - -define @intrinsic_vfnmsac_mask_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv4f32_f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu -; CHECK-NEXT: vfnmsac.vf v8, fa0, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsac.mask.nxv4f32.f32( - %0, - float %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfnmsac.nxv8f32.f32( - , - float, - , - i64); - -define @intrinsic_vfnmsac_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv8f32_f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu -; CHECK-NEXT: vfnmsac.vf v8, fa0, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsac.nxv8f32.f32( - %0, - float %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfnmsac.mask.nxv8f32.f32( - , - float, - , - , - i64); - -define @intrinsic_vfnmsac_mask_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv8f32_f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu -; CHECK-NEXT: vfnmsac.vf v8, fa0, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsac.mask.nxv8f32.f32( - %0, - float %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfnmsac.nxv1f64.f64( - , - double, - , - i64); - -define @intrinsic_vfnmsac_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv1f64_f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu -; CHECK-NEXT: vfnmsac.vf v8, fa0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsac.nxv1f64.f64( - %0, - double %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfnmsac.mask.nxv1f64.f64( - , - double, - , - , - i64); - -define @intrinsic_vfnmsac_mask_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv1f64_f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu -; CHECK-NEXT: vfnmsac.vf v8, fa0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsac.mask.nxv1f64.f64( - %0, - double %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfnmsac.nxv2f64.f64( - , - double, - , - i64); - -define @intrinsic_vfnmsac_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv2f64_f64_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu -; CHECK-NEXT: vfnmsac.vf v8, fa0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsac.nxv2f64.f64( - %0, - double %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfnmsac.mask.nxv2f64.f64( - , - double, - , - , - i64); - -define @intrinsic_vfnmsac_mask_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv2f64_f64_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu -; CHECK-NEXT: vfnmsac.vf v8, fa0, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsac.mask.nxv2f64.f64( - %0, - double %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfnmsac.nxv4f64.f64( - , - double, - , - i64); - -define @intrinsic_vfnmsac_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv4f64_f64_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu -; CHECK-NEXT: vfnmsac.vf v8, fa0, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsac.nxv4f64.f64( - %0, - double %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfnmsac.mask.nxv4f64.f64( - , - double, - , - , - i64); - -define @intrinsic_vfnmsac_mask_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv4f64_f64_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu -; CHECK-NEXT: vfnmsac.vf v8, fa0, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsac.mask.nxv4f64.f64( - %0, - double %1, - %2, - %3, - i64 %4) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfnmsac-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmsac.ll similarity index 90% rename from llvm/test/CodeGen/RISCV/rvv/vfnmsac-rv32.ll rename to llvm/test/CodeGen/RISCV/rvv/vfnmsac.ll index 834938c7d6e74af62c46727899558c1e37445a7d..ff1bcfa86d3a80314cdd73655cf49e2c0187dfe4 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfnmsac-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfnmsac.ll @@ -1,13 +1,15 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: -target-abi=ilp32d < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfnmsac.nxv1f16.nxv1f16( , , , - i32); + iXLen); -define @intrinsic_vfnmsac_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfnmsac_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu @@ -18,7 +20,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -28,9 +30,9 @@ declare @llvm.riscv.vfnmsac.mask.nxv1f16.nxv1f16( , , , - i32); + iXLen); -define @intrinsic_vfnmsac_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfnmsac_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu @@ -42,7 +44,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -51,9 +53,9 @@ declare @llvm.riscv.vfnmsac.nxv2f16.nxv2f16( , , , - i32); + iXLen); -define @intrinsic_vfnmsac_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfnmsac_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu @@ -64,7 +66,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -74,9 +76,9 @@ declare @llvm.riscv.vfnmsac.mask.nxv2f16.nxv2f16( , , , - i32); + iXLen); -define @intrinsic_vfnmsac_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfnmsac_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu @@ -88,7 +90,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -97,9 +99,9 @@ declare @llvm.riscv.vfnmsac.nxv4f16.nxv4f16( , , , - i32); + iXLen); -define @intrinsic_vfnmsac_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfnmsac_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu @@ -110,7 +112,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -120,9 +122,9 @@ declare @llvm.riscv.vfnmsac.mask.nxv4f16.nxv4f16( , , , - i32); + iXLen); -define @intrinsic_vfnmsac_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfnmsac_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu @@ -134,7 +136,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -143,9 +145,9 @@ declare @llvm.riscv.vfnmsac.nxv8f16.nxv8f16( , , , - i32); + iXLen); -define @intrinsic_vfnmsac_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfnmsac_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu @@ -156,7 +158,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -166,9 +168,9 @@ declare @llvm.riscv.vfnmsac.mask.nxv8f16.nxv8f16( , , , - i32); + iXLen); -define @intrinsic_vfnmsac_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfnmsac_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu @@ -180,7 +182,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -189,9 +191,9 @@ declare @llvm.riscv.vfnmsac.nxv16f16.nxv16f16( , , , - i32); + iXLen); -define @intrinsic_vfnmsac_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfnmsac_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu @@ -202,7 +204,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -212,9 +214,9 @@ declare @llvm.riscv.vfnmsac.mask.nxv16f16.nxv16f16( , , , - i32); + iXLen); -define @intrinsic_vfnmsac_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfnmsac_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu @@ -226,7 +228,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -235,9 +237,9 @@ declare @llvm.riscv.vfnmsac.nxv1f32.nxv1f32( , , , - i32); + iXLen); -define @intrinsic_vfnmsac_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfnmsac_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu @@ -248,7 +250,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -258,9 +260,9 @@ declare @llvm.riscv.vfnmsac.mask.nxv1f32.nxv1f32( , , , - i32); + iXLen); -define @intrinsic_vfnmsac_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfnmsac_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu @@ -272,7 +274,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -281,9 +283,9 @@ declare @llvm.riscv.vfnmsac.nxv2f32.nxv2f32( , , , - i32); + iXLen); -define @intrinsic_vfnmsac_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfnmsac_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu @@ -294,7 +296,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -304,9 +306,9 @@ declare @llvm.riscv.vfnmsac.mask.nxv2f32.nxv2f32( , , , - i32); + iXLen); -define @intrinsic_vfnmsac_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfnmsac_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu @@ -318,7 +320,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -327,9 +329,9 @@ declare @llvm.riscv.vfnmsac.nxv4f32.nxv4f32( , , , - i32); + iXLen); -define @intrinsic_vfnmsac_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfnmsac_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu @@ -340,7 +342,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -350,9 +352,9 @@ declare @llvm.riscv.vfnmsac.mask.nxv4f32.nxv4f32( , , , - i32); + iXLen); -define @intrinsic_vfnmsac_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfnmsac_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu @@ -364,7 +366,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -373,9 +375,9 @@ declare @llvm.riscv.vfnmsac.nxv8f32.nxv8f32( , , , - i32); + iXLen); -define @intrinsic_vfnmsac_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfnmsac_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu @@ -386,7 +388,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -396,9 +398,9 @@ declare @llvm.riscv.vfnmsac.mask.nxv8f32.nxv8f32( , , , - i32); + iXLen); -define @intrinsic_vfnmsac_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfnmsac_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu @@ -410,7 +412,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -419,9 +421,9 @@ declare @llvm.riscv.vfnmsac.nxv1f64.nxv1f64( , , , - i32); + iXLen); -define @intrinsic_vfnmsac_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfnmsac_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu @@ -432,7 +434,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -442,9 +444,9 @@ declare @llvm.riscv.vfnmsac.mask.nxv1f64.nxv1f64( , , , - i32); + iXLen); -define @intrinsic_vfnmsac_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfnmsac_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu @@ -456,7 +458,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -465,9 +467,9 @@ declare @llvm.riscv.vfnmsac.nxv2f64.nxv2f64( , , , - i32); + iXLen); -define @intrinsic_vfnmsac_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfnmsac_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu @@ -478,7 +480,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -488,9 +490,9 @@ declare @llvm.riscv.vfnmsac.mask.nxv2f64.nxv2f64( , , , - i32); + iXLen); -define @intrinsic_vfnmsac_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfnmsac_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu @@ -502,7 +504,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -511,9 +513,9 @@ declare @llvm.riscv.vfnmsac.nxv4f64.nxv4f64( , , , - i32); + iXLen); -define @intrinsic_vfnmsac_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfnmsac_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu @@ -524,7 +526,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -534,9 +536,9 @@ declare @llvm.riscv.vfnmsac.mask.nxv4f64.nxv4f64( , , , - i32); + iXLen); -define @intrinsic_vfnmsac_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfnmsac_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu @@ -548,7 +550,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -557,9 +559,9 @@ declare @llvm.riscv.vfnmsac.nxv1f16.f16( , half, , - i32); + iXLen); -define @intrinsic_vfnmsac_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, i32 %3) nounwind { +define @intrinsic_vfnmsac_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv1f16_f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu @@ -570,7 +572,7 @@ entry: %0, half %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -580,9 +582,9 @@ declare @llvm.riscv.vfnmsac.mask.nxv1f16.f16( half, , , - i32); + iXLen); -define @intrinsic_vfnmsac_mask_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfnmsac_mask_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv1f16_f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu @@ -594,7 +596,7 @@ entry: half %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -603,9 +605,9 @@ declare @llvm.riscv.vfnmsac.nxv2f16.f16( , half, , - i32); + iXLen); -define @intrinsic_vfnmsac_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, i32 %3) nounwind { +define @intrinsic_vfnmsac_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv2f16_f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu @@ -616,7 +618,7 @@ entry: %0, half %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -626,9 +628,9 @@ declare @llvm.riscv.vfnmsac.mask.nxv2f16.f16( half, , , - i32); + iXLen); -define @intrinsic_vfnmsac_mask_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfnmsac_mask_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv2f16_f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu @@ -640,7 +642,7 @@ entry: half %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -649,9 +651,9 @@ declare @llvm.riscv.vfnmsac.nxv4f16.f16( , half, , - i32); + iXLen); -define @intrinsic_vfnmsac_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, i32 %3) nounwind { +define @intrinsic_vfnmsac_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv4f16_f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu @@ -662,7 +664,7 @@ entry: %0, half %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -672,9 +674,9 @@ declare @llvm.riscv.vfnmsac.mask.nxv4f16.f16( half, , , - i32); + iXLen); -define @intrinsic_vfnmsac_mask_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfnmsac_mask_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv4f16_f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu @@ -686,7 +688,7 @@ entry: half %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -695,9 +697,9 @@ declare @llvm.riscv.vfnmsac.nxv8f16.f16( , half, , - i32); + iXLen); -define @intrinsic_vfnmsac_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, i32 %3) nounwind { +define @intrinsic_vfnmsac_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv8f16_f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu @@ -708,7 +710,7 @@ entry: %0, half %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -718,9 +720,9 @@ declare @llvm.riscv.vfnmsac.mask.nxv8f16.f16( half, , , - i32); + iXLen); -define @intrinsic_vfnmsac_mask_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfnmsac_mask_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv8f16_f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu @@ -732,7 +734,7 @@ entry: half %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -741,9 +743,9 @@ declare @llvm.riscv.vfnmsac.nxv16f16.f16( , half, , - i32); + iXLen); -define @intrinsic_vfnmsac_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, i32 %3) nounwind { +define @intrinsic_vfnmsac_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv16f16_f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu @@ -754,7 +756,7 @@ entry: %0, half %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -764,9 +766,9 @@ declare @llvm.riscv.vfnmsac.mask.nxv16f16.f16( half, , , - i32); + iXLen); -define @intrinsic_vfnmsac_mask_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfnmsac_mask_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv16f16_f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu @@ -778,7 +780,7 @@ entry: half %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -787,9 +789,9 @@ declare @llvm.riscv.vfnmsac.nxv1f32.f32( , float, , - i32); + iXLen); -define @intrinsic_vfnmsac_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, i32 %3) nounwind { +define @intrinsic_vfnmsac_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv1f32_f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu @@ -800,7 +802,7 @@ entry: %0, float %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -810,9 +812,9 @@ declare @llvm.riscv.vfnmsac.mask.nxv1f32.f32( float, , , - i32); + iXLen); -define @intrinsic_vfnmsac_mask_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfnmsac_mask_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv1f32_f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu @@ -824,7 +826,7 @@ entry: float %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -833,9 +835,9 @@ declare @llvm.riscv.vfnmsac.nxv2f32.f32( , float, , - i32); + iXLen); -define @intrinsic_vfnmsac_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, i32 %3) nounwind { +define @intrinsic_vfnmsac_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv2f32_f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu @@ -846,7 +848,7 @@ entry: %0, float %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -856,9 +858,9 @@ declare @llvm.riscv.vfnmsac.mask.nxv2f32.f32( float, , , - i32); + iXLen); -define @intrinsic_vfnmsac_mask_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfnmsac_mask_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv2f32_f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu @@ -870,7 +872,7 @@ entry: float %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -879,9 +881,9 @@ declare @llvm.riscv.vfnmsac.nxv4f32.f32( , float, , - i32); + iXLen); -define @intrinsic_vfnmsac_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, i32 %3) nounwind { +define @intrinsic_vfnmsac_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv4f32_f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu @@ -892,7 +894,7 @@ entry: %0, float %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -902,9 +904,9 @@ declare @llvm.riscv.vfnmsac.mask.nxv4f32.f32( float, , , - i32); + iXLen); -define @intrinsic_vfnmsac_mask_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfnmsac_mask_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv4f32_f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu @@ -916,7 +918,7 @@ entry: float %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -925,9 +927,9 @@ declare @llvm.riscv.vfnmsac.nxv8f32.f32( , float, , - i32); + iXLen); -define @intrinsic_vfnmsac_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, i32 %3) nounwind { +define @intrinsic_vfnmsac_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv8f32_f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu @@ -938,7 +940,7 @@ entry: %0, float %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -948,9 +950,9 @@ declare @llvm.riscv.vfnmsac.mask.nxv8f32.f32( float, , , - i32); + iXLen); -define @intrinsic_vfnmsac_mask_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfnmsac_mask_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv8f32_f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu @@ -962,7 +964,7 @@ entry: float %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -971,9 +973,9 @@ declare @llvm.riscv.vfnmsac.nxv1f64.f64( , double, , - i32); + iXLen); -define @intrinsic_vfnmsac_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, i32 %3) nounwind { +define @intrinsic_vfnmsac_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv1f64_f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu @@ -984,7 +986,7 @@ entry: %0, double %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -994,9 +996,9 @@ declare @llvm.riscv.vfnmsac.mask.nxv1f64.f64( double, , , - i32); + iXLen); -define @intrinsic_vfnmsac_mask_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfnmsac_mask_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv1f64_f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu @@ -1008,7 +1010,7 @@ entry: double %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -1017,9 +1019,9 @@ declare @llvm.riscv.vfnmsac.nxv2f64.f64( , double, , - i32); + iXLen); -define @intrinsic_vfnmsac_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, i32 %3) nounwind { +define @intrinsic_vfnmsac_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv2f64_f64_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu @@ -1030,7 +1032,7 @@ entry: %0, double %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -1040,9 +1042,9 @@ declare @llvm.riscv.vfnmsac.mask.nxv2f64.f64( double, , , - i32); + iXLen); -define @intrinsic_vfnmsac_mask_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfnmsac_mask_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv2f64_f64_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu @@ -1054,7 +1056,7 @@ entry: double %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -1063,9 +1065,9 @@ declare @llvm.riscv.vfnmsac.nxv4f64.f64( , double, , - i32); + iXLen); -define @intrinsic_vfnmsac_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, i32 %3) nounwind { +define @intrinsic_vfnmsac_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv4f64_f64_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu @@ -1076,7 +1078,7 @@ entry: %0, double %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -1086,9 +1088,9 @@ declare @llvm.riscv.vfnmsac.mask.nxv4f64.f64( double, , , - i32); + iXLen); -define @intrinsic_vfnmsac_mask_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfnmsac_mask_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv4f64_f64_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu @@ -1100,7 +1102,7 @@ entry: double %1, %2, %3, - i32 %4) + iXLen %4) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfnmsub-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmsub-rv32.ll deleted file mode 100644 index 67dbb5a92dfa3606b9aab9e8346d87ef7e42c270..0000000000000000000000000000000000000000 --- a/llvm/test/CodeGen/RISCV/rvv/vfnmsub-rv32.ll +++ /dev/null @@ -1,1106 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: -target-abi=ilp32d < %s | FileCheck %s -declare @llvm.riscv.vfnmsub.nxv1f16.nxv1f16( - , - , - , - i32); - -define @intrinsic_vfnmsub_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv1f16_nxv1f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu -; CHECK-NEXT: vfnmsub.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsub.nxv1f16.nxv1f16( - %0, - %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vfnmsub.mask.nxv1f16.nxv1f16( - , - , - , - , - i32); - -define @intrinsic_vfnmsub_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv1f16_nxv1f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu -; CHECK-NEXT: vfnmsub.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsub.mask.nxv1f16.nxv1f16( - %0, - %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vfnmsub.nxv2f16.nxv2f16( - , - , - , - i32); - -define @intrinsic_vfnmsub_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv2f16_nxv2f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu -; CHECK-NEXT: vfnmsub.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsub.nxv2f16.nxv2f16( - %0, - %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vfnmsub.mask.nxv2f16.nxv2f16( - , - , - , - , - i32); - -define @intrinsic_vfnmsub_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv2f16_nxv2f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu -; CHECK-NEXT: vfnmsub.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsub.mask.nxv2f16.nxv2f16( - %0, - %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vfnmsub.nxv4f16.nxv4f16( - , - , - , - i32); - -define @intrinsic_vfnmsub_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv4f16_nxv4f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu -; CHECK-NEXT: vfnmsub.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsub.nxv4f16.nxv4f16( - %0, - %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vfnmsub.mask.nxv4f16.nxv4f16( - , - , - , - , - i32); - -define @intrinsic_vfnmsub_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv4f16_nxv4f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu -; CHECK-NEXT: vfnmsub.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsub.mask.nxv4f16.nxv4f16( - %0, - %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vfnmsub.nxv8f16.nxv8f16( - , - , - , - i32); - -define @intrinsic_vfnmsub_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv8f16_nxv8f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu -; CHECK-NEXT: vfnmsub.vv v8, v10, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsub.nxv8f16.nxv8f16( - %0, - %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vfnmsub.mask.nxv8f16.nxv8f16( - , - , - , - , - i32); - -define @intrinsic_vfnmsub_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv8f16_nxv8f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu -; CHECK-NEXT: vfnmsub.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsub.mask.nxv8f16.nxv8f16( - %0, - %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vfnmsub.nxv16f16.nxv16f16( - , - , - , - i32); - -define @intrinsic_vfnmsub_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv16f16_nxv16f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu -; CHECK-NEXT: vfnmsub.vv v8, v12, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsub.nxv16f16.nxv16f16( - %0, - %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vfnmsub.mask.nxv16f16.nxv16f16( - , - , - , - , - i32); - -define @intrinsic_vfnmsub_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv16f16_nxv16f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu -; CHECK-NEXT: vfnmsub.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsub.mask.nxv16f16.nxv16f16( - %0, - %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vfnmsub.nxv1f32.nxv1f32( - , - , - , - i32); - -define @intrinsic_vfnmsub_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv1f32_nxv1f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu -; CHECK-NEXT: vfnmsub.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsub.nxv1f32.nxv1f32( - %0, - %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vfnmsub.mask.nxv1f32.nxv1f32( - , - , - , - , - i32); - -define @intrinsic_vfnmsub_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv1f32_nxv1f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu -; CHECK-NEXT: vfnmsub.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsub.mask.nxv1f32.nxv1f32( - %0, - %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vfnmsub.nxv2f32.nxv2f32( - , - , - , - i32); - -define @intrinsic_vfnmsub_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv2f32_nxv2f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu -; CHECK-NEXT: vfnmsub.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsub.nxv2f32.nxv2f32( - %0, - %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vfnmsub.mask.nxv2f32.nxv2f32( - , - , - , - , - i32); - -define @intrinsic_vfnmsub_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv2f32_nxv2f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu -; CHECK-NEXT: vfnmsub.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsub.mask.nxv2f32.nxv2f32( - %0, - %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vfnmsub.nxv4f32.nxv4f32( - , - , - , - i32); - -define @intrinsic_vfnmsub_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv4f32_nxv4f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu -; CHECK-NEXT: vfnmsub.vv v8, v10, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsub.nxv4f32.nxv4f32( - %0, - %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vfnmsub.mask.nxv4f32.nxv4f32( - , - , - , - , - i32); - -define @intrinsic_vfnmsub_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv4f32_nxv4f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu -; CHECK-NEXT: vfnmsub.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsub.mask.nxv4f32.nxv4f32( - %0, - %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vfnmsub.nxv8f32.nxv8f32( - , - , - , - i32); - -define @intrinsic_vfnmsub_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv8f32_nxv8f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu -; CHECK-NEXT: vfnmsub.vv v8, v12, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsub.nxv8f32.nxv8f32( - %0, - %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vfnmsub.mask.nxv8f32.nxv8f32( - , - , - , - , - i32); - -define @intrinsic_vfnmsub_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv8f32_nxv8f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu -; CHECK-NEXT: vfnmsub.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsub.mask.nxv8f32.nxv8f32( - %0, - %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vfnmsub.nxv1f64.nxv1f64( - , - , - , - i32); - -define @intrinsic_vfnmsub_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv1f64_nxv1f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu -; CHECK-NEXT: vfnmsub.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsub.nxv1f64.nxv1f64( - %0, - %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vfnmsub.mask.nxv1f64.nxv1f64( - , - , - , - , - i32); - -define @intrinsic_vfnmsub_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv1f64_nxv1f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu -; CHECK-NEXT: vfnmsub.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsub.mask.nxv1f64.nxv1f64( - %0, - %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vfnmsub.nxv2f64.nxv2f64( - , - , - , - i32); - -define @intrinsic_vfnmsub_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv2f64_nxv2f64_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu -; CHECK-NEXT: vfnmsub.vv v8, v10, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsub.nxv2f64.nxv2f64( - %0, - %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vfnmsub.mask.nxv2f64.nxv2f64( - , - , - , - , - i32); - -define @intrinsic_vfnmsub_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv2f64_nxv2f64_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu -; CHECK-NEXT: vfnmsub.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsub.mask.nxv2f64.nxv2f64( - %0, - %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vfnmsub.nxv4f64.nxv4f64( - , - , - , - i32); - -define @intrinsic_vfnmsub_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv4f64_nxv4f64_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu -; CHECK-NEXT: vfnmsub.vv v8, v12, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsub.nxv4f64.nxv4f64( - %0, - %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vfnmsub.mask.nxv4f64.nxv4f64( - , - , - , - , - i32); - -define @intrinsic_vfnmsub_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv4f64_nxv4f64_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu -; CHECK-NEXT: vfnmsub.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsub.mask.nxv4f64.nxv4f64( - %0, - %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vfnmsub.nxv1f16.f16( - , - half, - , - i32); - -define @intrinsic_vfnmsub_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv1f16_f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu -; CHECK-NEXT: vfnmsub.vf v8, fa0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsub.nxv1f16.f16( - %0, - half %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vfnmsub.mask.nxv1f16.f16( - , - half, - , - , - i32); - -define @intrinsic_vfnmsub_mask_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv1f16_f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu -; CHECK-NEXT: vfnmsub.vf v8, fa0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsub.mask.nxv1f16.f16( - %0, - half %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vfnmsub.nxv2f16.f16( - , - half, - , - i32); - -define @intrinsic_vfnmsub_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv2f16_f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu -; CHECK-NEXT: vfnmsub.vf v8, fa0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsub.nxv2f16.f16( - %0, - half %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vfnmsub.mask.nxv2f16.f16( - , - half, - , - , - i32); - -define @intrinsic_vfnmsub_mask_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv2f16_f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu -; CHECK-NEXT: vfnmsub.vf v8, fa0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsub.mask.nxv2f16.f16( - %0, - half %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vfnmsub.nxv4f16.f16( - , - half, - , - i32); - -define @intrinsic_vfnmsub_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv4f16_f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu -; CHECK-NEXT: vfnmsub.vf v8, fa0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsub.nxv4f16.f16( - %0, - half %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vfnmsub.mask.nxv4f16.f16( - , - half, - , - , - i32); - -define @intrinsic_vfnmsub_mask_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv4f16_f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu -; CHECK-NEXT: vfnmsub.vf v8, fa0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsub.mask.nxv4f16.f16( - %0, - half %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vfnmsub.nxv8f16.f16( - , - half, - , - i32); - -define @intrinsic_vfnmsub_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv8f16_f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu -; CHECK-NEXT: vfnmsub.vf v8, fa0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsub.nxv8f16.f16( - %0, - half %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vfnmsub.mask.nxv8f16.f16( - , - half, - , - , - i32); - -define @intrinsic_vfnmsub_mask_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv8f16_f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu -; CHECK-NEXT: vfnmsub.vf v8, fa0, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsub.mask.nxv8f16.f16( - %0, - half %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vfnmsub.nxv16f16.f16( - , - half, - , - i32); - -define @intrinsic_vfnmsub_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv16f16_f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu -; CHECK-NEXT: vfnmsub.vf v8, fa0, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsub.nxv16f16.f16( - %0, - half %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vfnmsub.mask.nxv16f16.f16( - , - half, - , - , - i32); - -define @intrinsic_vfnmsub_mask_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv16f16_f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu -; CHECK-NEXT: vfnmsub.vf v8, fa0, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsub.mask.nxv16f16.f16( - %0, - half %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vfnmsub.nxv1f32.f32( - , - float, - , - i32); - -define @intrinsic_vfnmsub_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv1f32_f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu -; CHECK-NEXT: vfnmsub.vf v8, fa0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsub.nxv1f32.f32( - %0, - float %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vfnmsub.mask.nxv1f32.f32( - , - float, - , - , - i32); - -define @intrinsic_vfnmsub_mask_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv1f32_f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu -; CHECK-NEXT: vfnmsub.vf v8, fa0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsub.mask.nxv1f32.f32( - %0, - float %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vfnmsub.nxv2f32.f32( - , - float, - , - i32); - -define @intrinsic_vfnmsub_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv2f32_f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu -; CHECK-NEXT: vfnmsub.vf v8, fa0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsub.nxv2f32.f32( - %0, - float %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vfnmsub.mask.nxv2f32.f32( - , - float, - , - , - i32); - -define @intrinsic_vfnmsub_mask_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv2f32_f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu -; CHECK-NEXT: vfnmsub.vf v8, fa0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsub.mask.nxv2f32.f32( - %0, - float %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vfnmsub.nxv4f32.f32( - , - float, - , - i32); - -define @intrinsic_vfnmsub_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv4f32_f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu -; CHECK-NEXT: vfnmsub.vf v8, fa0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsub.nxv4f32.f32( - %0, - float %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vfnmsub.mask.nxv4f32.f32( - , - float, - , - , - i32); - -define @intrinsic_vfnmsub_mask_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv4f32_f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu -; CHECK-NEXT: vfnmsub.vf v8, fa0, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsub.mask.nxv4f32.f32( - %0, - float %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vfnmsub.nxv8f32.f32( - , - float, - , - i32); - -define @intrinsic_vfnmsub_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv8f32_f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu -; CHECK-NEXT: vfnmsub.vf v8, fa0, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsub.nxv8f32.f32( - %0, - float %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vfnmsub.mask.nxv8f32.f32( - , - float, - , - , - i32); - -define @intrinsic_vfnmsub_mask_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv8f32_f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu -; CHECK-NEXT: vfnmsub.vf v8, fa0, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsub.mask.nxv8f32.f32( - %0, - float %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vfnmsub.nxv1f64.f64( - , - double, - , - i32); - -define @intrinsic_vfnmsub_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv1f64_f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu -; CHECK-NEXT: vfnmsub.vf v8, fa0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsub.nxv1f64.f64( - %0, - double %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vfnmsub.mask.nxv1f64.f64( - , - double, - , - , - i32); - -define @intrinsic_vfnmsub_mask_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv1f64_f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu -; CHECK-NEXT: vfnmsub.vf v8, fa0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsub.mask.nxv1f64.f64( - %0, - double %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vfnmsub.nxv2f64.f64( - , - double, - , - i32); - -define @intrinsic_vfnmsub_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv2f64_f64_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu -; CHECK-NEXT: vfnmsub.vf v8, fa0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsub.nxv2f64.f64( - %0, - double %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vfnmsub.mask.nxv2f64.f64( - , - double, - , - , - i32); - -define @intrinsic_vfnmsub_mask_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv2f64_f64_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu -; CHECK-NEXT: vfnmsub.vf v8, fa0, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsub.mask.nxv2f64.f64( - %0, - double %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vfnmsub.nxv4f64.f64( - , - double, - , - i32); - -define @intrinsic_vfnmsub_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv4f64_f64_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu -; CHECK-NEXT: vfnmsub.vf v8, fa0, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsub.nxv4f64.f64( - %0, - double %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vfnmsub.mask.nxv4f64.f64( - , - double, - , - , - i32); - -define @intrinsic_vfnmsub_mask_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv4f64_f64_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu -; CHECK-NEXT: vfnmsub.vf v8, fa0, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsub.mask.nxv4f64.f64( - %0, - double %1, - %2, - %3, - i32 %4) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfnmsub-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmsub.ll similarity index 90% rename from llvm/test/CodeGen/RISCV/rvv/vfnmsub-rv64.ll rename to llvm/test/CodeGen/RISCV/rvv/vfnmsub.ll index 07b23dbfb066d87a142f174fb589b72407488132..e6ca32f34752c1d14eb28215c2badeb5ece24f4f 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfnmsub-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfnmsub.ll @@ -1,13 +1,15 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: -target-abi=lp64d < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfnmsub.nxv1f16.nxv1f16( , , , - i64); + iXLen); -define @intrinsic_vfnmsub_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vfnmsub_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu @@ -18,7 +20,7 @@ entry: %0, %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -28,9 +30,9 @@ declare @llvm.riscv.vfnmsub.mask.nxv1f16.nxv1f16( , , , - i64); + iXLen); -define @intrinsic_vfnmsub_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfnmsub_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu @@ -42,7 +44,7 @@ entry: %1, %2, %3, - i64 %4) + iXLen %4) ret %a } @@ -51,9 +53,9 @@ declare @llvm.riscv.vfnmsub.nxv2f16.nxv2f16( , , , - i64); + iXLen); -define @intrinsic_vfnmsub_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vfnmsub_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu @@ -64,7 +66,7 @@ entry: %0, %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -74,9 +76,9 @@ declare @llvm.riscv.vfnmsub.mask.nxv2f16.nxv2f16( , , , - i64); + iXLen); -define @intrinsic_vfnmsub_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfnmsub_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu @@ -88,7 +90,7 @@ entry: %1, %2, %3, - i64 %4) + iXLen %4) ret %a } @@ -97,9 +99,9 @@ declare @llvm.riscv.vfnmsub.nxv4f16.nxv4f16( , , , - i64); + iXLen); -define @intrinsic_vfnmsub_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vfnmsub_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu @@ -110,7 +112,7 @@ entry: %0, %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -120,9 +122,9 @@ declare @llvm.riscv.vfnmsub.mask.nxv4f16.nxv4f16( , , , - i64); + iXLen); -define @intrinsic_vfnmsub_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfnmsub_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu @@ -134,7 +136,7 @@ entry: %1, %2, %3, - i64 %4) + iXLen %4) ret %a } @@ -143,9 +145,9 @@ declare @llvm.riscv.vfnmsub.nxv8f16.nxv8f16( , , , - i64); + iXLen); -define @intrinsic_vfnmsub_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vfnmsub_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu @@ -156,7 +158,7 @@ entry: %0, %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -166,9 +168,9 @@ declare @llvm.riscv.vfnmsub.mask.nxv8f16.nxv8f16( , , , - i64); + iXLen); -define @intrinsic_vfnmsub_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfnmsub_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu @@ -180,7 +182,7 @@ entry: %1, %2, %3, - i64 %4) + iXLen %4) ret %a } @@ -189,9 +191,9 @@ declare @llvm.riscv.vfnmsub.nxv16f16.nxv16f16( , , , - i64); + iXLen); -define @intrinsic_vfnmsub_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vfnmsub_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu @@ -202,7 +204,7 @@ entry: %0, %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -212,9 +214,9 @@ declare @llvm.riscv.vfnmsub.mask.nxv16f16.nxv16f16( , , , - i64); + iXLen); -define @intrinsic_vfnmsub_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfnmsub_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu @@ -226,7 +228,7 @@ entry: %1, %2, %3, - i64 %4) + iXLen %4) ret %a } @@ -235,9 +237,9 @@ declare @llvm.riscv.vfnmsub.nxv1f32.nxv1f32( , , , - i64); + iXLen); -define @intrinsic_vfnmsub_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vfnmsub_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu @@ -248,7 +250,7 @@ entry: %0, %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -258,9 +260,9 @@ declare @llvm.riscv.vfnmsub.mask.nxv1f32.nxv1f32( , , , - i64); + iXLen); -define @intrinsic_vfnmsub_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfnmsub_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu @@ -272,7 +274,7 @@ entry: %1, %2, %3, - i64 %4) + iXLen %4) ret %a } @@ -281,9 +283,9 @@ declare @llvm.riscv.vfnmsub.nxv2f32.nxv2f32( , , , - i64); + iXLen); -define @intrinsic_vfnmsub_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vfnmsub_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu @@ -294,7 +296,7 @@ entry: %0, %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -304,9 +306,9 @@ declare @llvm.riscv.vfnmsub.mask.nxv2f32.nxv2f32( , , , - i64); + iXLen); -define @intrinsic_vfnmsub_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfnmsub_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu @@ -318,7 +320,7 @@ entry: %1, %2, %3, - i64 %4) + iXLen %4) ret %a } @@ -327,9 +329,9 @@ declare @llvm.riscv.vfnmsub.nxv4f32.nxv4f32( , , , - i64); + iXLen); -define @intrinsic_vfnmsub_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vfnmsub_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu @@ -340,7 +342,7 @@ entry: %0, %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -350,9 +352,9 @@ declare @llvm.riscv.vfnmsub.mask.nxv4f32.nxv4f32( , , , - i64); + iXLen); -define @intrinsic_vfnmsub_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfnmsub_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu @@ -364,7 +366,7 @@ entry: %1, %2, %3, - i64 %4) + iXLen %4) ret %a } @@ -373,9 +375,9 @@ declare @llvm.riscv.vfnmsub.nxv8f32.nxv8f32( , , , - i64); + iXLen); -define @intrinsic_vfnmsub_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vfnmsub_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu @@ -386,7 +388,7 @@ entry: %0, %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -396,9 +398,9 @@ declare @llvm.riscv.vfnmsub.mask.nxv8f32.nxv8f32( , , , - i64); + iXLen); -define @intrinsic_vfnmsub_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfnmsub_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu @@ -410,7 +412,7 @@ entry: %1, %2, %3, - i64 %4) + iXLen %4) ret %a } @@ -419,9 +421,9 @@ declare @llvm.riscv.vfnmsub.nxv1f64.nxv1f64( , , , - i64); + iXLen); -define @intrinsic_vfnmsub_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vfnmsub_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu @@ -432,7 +434,7 @@ entry: %0, %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -442,9 +444,9 @@ declare @llvm.riscv.vfnmsub.mask.nxv1f64.nxv1f64( , , , - i64); + iXLen); -define @intrinsic_vfnmsub_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfnmsub_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu @@ -456,7 +458,7 @@ entry: %1, %2, %3, - i64 %4) + iXLen %4) ret %a } @@ -465,9 +467,9 @@ declare @llvm.riscv.vfnmsub.nxv2f64.nxv2f64( , , , - i64); + iXLen); -define @intrinsic_vfnmsub_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vfnmsub_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu @@ -478,7 +480,7 @@ entry: %0, %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -488,9 +490,9 @@ declare @llvm.riscv.vfnmsub.mask.nxv2f64.nxv2f64( , , , - i64); + iXLen); -define @intrinsic_vfnmsub_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfnmsub_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu @@ -502,7 +504,7 @@ entry: %1, %2, %3, - i64 %4) + iXLen %4) ret %a } @@ -511,9 +513,9 @@ declare @llvm.riscv.vfnmsub.nxv4f64.nxv4f64( , , , - i64); + iXLen); -define @intrinsic_vfnmsub_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vfnmsub_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu @@ -524,7 +526,7 @@ entry: %0, %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -534,9 +536,9 @@ declare @llvm.riscv.vfnmsub.mask.nxv4f64.nxv4f64( , , , - i64); + iXLen); -define @intrinsic_vfnmsub_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfnmsub_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu @@ -548,7 +550,7 @@ entry: %1, %2, %3, - i64 %4) + iXLen %4) ret %a } @@ -557,9 +559,9 @@ declare @llvm.riscv.vfnmsub.nxv1f16.f16( , half, , - i64); + iXLen); -define @intrinsic_vfnmsub_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, i64 %3) nounwind { +define @intrinsic_vfnmsub_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv1f16_f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu @@ -570,7 +572,7 @@ entry: %0, half %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -580,9 +582,9 @@ declare @llvm.riscv.vfnmsub.mask.nxv1f16.f16( half, , , - i64); + iXLen); -define @intrinsic_vfnmsub_mask_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfnmsub_mask_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv1f16_f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu @@ -594,7 +596,7 @@ entry: half %1, %2, %3, - i64 %4) + iXLen %4) ret %a } @@ -603,9 +605,9 @@ declare @llvm.riscv.vfnmsub.nxv2f16.f16( , half, , - i64); + iXLen); -define @intrinsic_vfnmsub_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, i64 %3) nounwind { +define @intrinsic_vfnmsub_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv2f16_f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu @@ -616,7 +618,7 @@ entry: %0, half %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -626,9 +628,9 @@ declare @llvm.riscv.vfnmsub.mask.nxv2f16.f16( half, , , - i64); + iXLen); -define @intrinsic_vfnmsub_mask_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfnmsub_mask_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv2f16_f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu @@ -640,7 +642,7 @@ entry: half %1, %2, %3, - i64 %4) + iXLen %4) ret %a } @@ -649,9 +651,9 @@ declare @llvm.riscv.vfnmsub.nxv4f16.f16( , half, , - i64); + iXLen); -define @intrinsic_vfnmsub_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, i64 %3) nounwind { +define @intrinsic_vfnmsub_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv4f16_f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu @@ -662,7 +664,7 @@ entry: %0, half %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -672,9 +674,9 @@ declare @llvm.riscv.vfnmsub.mask.nxv4f16.f16( half, , , - i64); + iXLen); -define @intrinsic_vfnmsub_mask_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfnmsub_mask_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv4f16_f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu @@ -686,7 +688,7 @@ entry: half %1, %2, %3, - i64 %4) + iXLen %4) ret %a } @@ -695,9 +697,9 @@ declare @llvm.riscv.vfnmsub.nxv8f16.f16( , half, , - i64); + iXLen); -define @intrinsic_vfnmsub_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, i64 %3) nounwind { +define @intrinsic_vfnmsub_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv8f16_f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu @@ -708,7 +710,7 @@ entry: %0, half %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -718,9 +720,9 @@ declare @llvm.riscv.vfnmsub.mask.nxv8f16.f16( half, , , - i64); + iXLen); -define @intrinsic_vfnmsub_mask_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfnmsub_mask_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv8f16_f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu @@ -732,7 +734,7 @@ entry: half %1, %2, %3, - i64 %4) + iXLen %4) ret %a } @@ -741,9 +743,9 @@ declare @llvm.riscv.vfnmsub.nxv16f16.f16( , half, , - i64); + iXLen); -define @intrinsic_vfnmsub_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, i64 %3) nounwind { +define @intrinsic_vfnmsub_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv16f16_f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu @@ -754,7 +756,7 @@ entry: %0, half %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -764,9 +766,9 @@ declare @llvm.riscv.vfnmsub.mask.nxv16f16.f16( half, , , - i64); + iXLen); -define @intrinsic_vfnmsub_mask_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfnmsub_mask_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv16f16_f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu @@ -778,7 +780,7 @@ entry: half %1, %2, %3, - i64 %4) + iXLen %4) ret %a } @@ -787,9 +789,9 @@ declare @llvm.riscv.vfnmsub.nxv1f32.f32( , float, , - i64); + iXLen); -define @intrinsic_vfnmsub_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, i64 %3) nounwind { +define @intrinsic_vfnmsub_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv1f32_f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu @@ -800,7 +802,7 @@ entry: %0, float %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -810,9 +812,9 @@ declare @llvm.riscv.vfnmsub.mask.nxv1f32.f32( float, , , - i64); + iXLen); -define @intrinsic_vfnmsub_mask_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfnmsub_mask_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv1f32_f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu @@ -824,7 +826,7 @@ entry: float %1, %2, %3, - i64 %4) + iXLen %4) ret %a } @@ -833,9 +835,9 @@ declare @llvm.riscv.vfnmsub.nxv2f32.f32( , float, , - i64); + iXLen); -define @intrinsic_vfnmsub_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, i64 %3) nounwind { +define @intrinsic_vfnmsub_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv2f32_f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu @@ -846,7 +848,7 @@ entry: %0, float %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -856,9 +858,9 @@ declare @llvm.riscv.vfnmsub.mask.nxv2f32.f32( float, , , - i64); + iXLen); -define @intrinsic_vfnmsub_mask_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfnmsub_mask_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv2f32_f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu @@ -870,7 +872,7 @@ entry: float %1, %2, %3, - i64 %4) + iXLen %4) ret %a } @@ -879,9 +881,9 @@ declare @llvm.riscv.vfnmsub.nxv4f32.f32( , float, , - i64); + iXLen); -define @intrinsic_vfnmsub_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, i64 %3) nounwind { +define @intrinsic_vfnmsub_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv4f32_f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu @@ -892,7 +894,7 @@ entry: %0, float %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -902,9 +904,9 @@ declare @llvm.riscv.vfnmsub.mask.nxv4f32.f32( float, , , - i64); + iXLen); -define @intrinsic_vfnmsub_mask_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfnmsub_mask_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv4f32_f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu @@ -916,7 +918,7 @@ entry: float %1, %2, %3, - i64 %4) + iXLen %4) ret %a } @@ -925,9 +927,9 @@ declare @llvm.riscv.vfnmsub.nxv8f32.f32( , float, , - i64); + iXLen); -define @intrinsic_vfnmsub_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, i64 %3) nounwind { +define @intrinsic_vfnmsub_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv8f32_f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu @@ -938,7 +940,7 @@ entry: %0, float %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -948,9 +950,9 @@ declare @llvm.riscv.vfnmsub.mask.nxv8f32.f32( float, , , - i64); + iXLen); -define @intrinsic_vfnmsub_mask_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfnmsub_mask_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv8f32_f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu @@ -962,7 +964,7 @@ entry: float %1, %2, %3, - i64 %4) + iXLen %4) ret %a } @@ -971,9 +973,9 @@ declare @llvm.riscv.vfnmsub.nxv1f64.f64( , double, , - i64); + iXLen); -define @intrinsic_vfnmsub_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, i64 %3) nounwind { +define @intrinsic_vfnmsub_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv1f64_f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu @@ -984,7 +986,7 @@ entry: %0, double %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -994,9 +996,9 @@ declare @llvm.riscv.vfnmsub.mask.nxv1f64.f64( double, , , - i64); + iXLen); -define @intrinsic_vfnmsub_mask_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfnmsub_mask_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv1f64_f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu @@ -1008,7 +1010,7 @@ entry: double %1, %2, %3, - i64 %4) + iXLen %4) ret %a } @@ -1017,9 +1019,9 @@ declare @llvm.riscv.vfnmsub.nxv2f64.f64( , double, , - i64); + iXLen); -define @intrinsic_vfnmsub_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, i64 %3) nounwind { +define @intrinsic_vfnmsub_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv2f64_f64_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu @@ -1030,7 +1032,7 @@ entry: %0, double %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -1040,9 +1042,9 @@ declare @llvm.riscv.vfnmsub.mask.nxv2f64.f64( double, , , - i64); + iXLen); -define @intrinsic_vfnmsub_mask_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfnmsub_mask_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv2f64_f64_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu @@ -1054,7 +1056,7 @@ entry: double %1, %2, %3, - i64 %4) + iXLen %4) ret %a } @@ -1063,9 +1065,9 @@ declare @llvm.riscv.vfnmsub.nxv4f64.f64( , double, , - i64); + iXLen); -define @intrinsic_vfnmsub_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, i64 %3) nounwind { +define @intrinsic_vfnmsub_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv4f64_f64_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu @@ -1076,7 +1078,7 @@ entry: %0, double %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -1086,9 +1088,9 @@ declare @llvm.riscv.vfnmsub.mask.nxv4f64.f64( double, , , - i64); + iXLen); -define @intrinsic_vfnmsub_mask_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfnmsub_mask_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv4f64_f64_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu @@ -1100,7 +1102,7 @@ entry: double %1, %2, %3, - i64 %4) + iXLen %4) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfrdiv-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfrdiv-rv64.ll deleted file mode 100644 index ccdd6ad371896d62678740f06748adec7c67b0cd..0000000000000000000000000000000000000000 --- a/llvm/test/CodeGen/RISCV/rvv/vfrdiv-rv64.ll +++ /dev/null @@ -1,677 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: -target-abi=lp64d < %s | FileCheck %s -declare @llvm.riscv.vfrdiv.nxv1f16.f16( - , - half, - i64); - -define @intrinsic_vfrdiv_vf_nxv1f16_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv1f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrdiv.nxv1f16.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfrdiv.mask.nxv1f16.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfrdiv_mask_vf_nxv1f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv1f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfrdiv.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrdiv.mask.nxv1f16.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfrdiv.nxv2f16.f16( - , - half, - i64); - -define @intrinsic_vfrdiv_vf_nxv2f16_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv2f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrdiv.nxv2f16.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfrdiv.mask.nxv2f16.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfrdiv_mask_vf_nxv2f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv2f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfrdiv.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrdiv.mask.nxv2f16.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfrdiv.nxv4f16.f16( - , - half, - i64); - -define @intrinsic_vfrdiv_vf_nxv4f16_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv4f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrdiv.nxv4f16.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfrdiv.mask.nxv4f16.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfrdiv_mask_vf_nxv4f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv4f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfrdiv.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrdiv.mask.nxv4f16.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfrdiv.nxv8f16.f16( - , - half, - i64); - -define @intrinsic_vfrdiv_vf_nxv8f16_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv8f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrdiv.nxv8f16.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfrdiv.mask.nxv8f16.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfrdiv_mask_vf_nxv8f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv8f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfrdiv.vf v8, v10, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrdiv.mask.nxv8f16.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfrdiv.nxv16f16.f16( - , - half, - i64); - -define @intrinsic_vfrdiv_vf_nxv16f16_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv16f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrdiv.nxv16f16.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfrdiv.mask.nxv16f16.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfrdiv_mask_vf_nxv16f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv16f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfrdiv.vf v8, v12, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrdiv.mask.nxv16f16.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfrdiv.nxv32f16.f16( - , - half, - i64); - -define @intrinsic_vfrdiv_vf_nxv32f16_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv32f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu -; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrdiv.nxv32f16.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfrdiv.mask.nxv32f16.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfrdiv_mask_vf_nxv32f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv32f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu -; CHECK-NEXT: vfrdiv.vf v8, v16, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrdiv.mask.nxv32f16.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfrdiv.nxv1f32.f32( - , - float, - i64); - -define @intrinsic_vfrdiv_vf_nxv1f32_f32( %0, float %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv1f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrdiv.nxv1f32.f32( - %0, - float %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfrdiv.mask.nxv1f32.f32( - , - , - float, - , - i64, - i64); - -define @intrinsic_vfrdiv_mask_vf_nxv1f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv1f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfrdiv.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrdiv.mask.nxv1f32.f32( - %0, - %1, - float %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfrdiv.nxv2f32.f32( - , - float, - i64); - -define @intrinsic_vfrdiv_vf_nxv2f32_f32( %0, float %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv2f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrdiv.nxv2f32.f32( - %0, - float %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfrdiv.mask.nxv2f32.f32( - , - , - float, - , - i64, - i64); - -define @intrinsic_vfrdiv_mask_vf_nxv2f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv2f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfrdiv.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrdiv.mask.nxv2f32.f32( - %0, - %1, - float %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfrdiv.nxv4f32.f32( - , - float, - i64); - -define @intrinsic_vfrdiv_vf_nxv4f32_f32( %0, float %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv4f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrdiv.nxv4f32.f32( - %0, - float %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfrdiv.mask.nxv4f32.f32( - , - , - float, - , - i64, - i64); - -define @intrinsic_vfrdiv_mask_vf_nxv4f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv4f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfrdiv.vf v8, v10, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrdiv.mask.nxv4f32.f32( - %0, - %1, - float %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfrdiv.nxv8f32.f32( - , - float, - i64); - -define @intrinsic_vfrdiv_vf_nxv8f32_f32( %0, float %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv8f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrdiv.nxv8f32.f32( - %0, - float %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfrdiv.mask.nxv8f32.f32( - , - , - float, - , - i64, - i64); - -define @intrinsic_vfrdiv_mask_vf_nxv8f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv8f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfrdiv.vf v8, v12, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrdiv.mask.nxv8f32.f32( - %0, - %1, - float %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfrdiv.nxv16f32.f32( - , - float, - i64); - -define @intrinsic_vfrdiv_vf_nxv16f32_f32( %0, float %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv16f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu -; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrdiv.nxv16f32.f32( - %0, - float %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfrdiv.mask.nxv16f32.f32( - , - , - float, - , - i64, - i64); - -define @intrinsic_vfrdiv_mask_vf_nxv16f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv16f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu -; CHECK-NEXT: vfrdiv.vf v8, v16, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrdiv.mask.nxv16f32.f32( - %0, - %1, - float %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfrdiv.nxv1f64.f64( - , - double, - i64); - -define @intrinsic_vfrdiv_vf_nxv1f64_f64( %0, double %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv1f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrdiv.nxv1f64.f64( - %0, - double %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfrdiv.mask.nxv1f64.f64( - , - , - double, - , - i64, - i64); - -define @intrinsic_vfrdiv_mask_vf_nxv1f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv1f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vfrdiv.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrdiv.mask.nxv1f64.f64( - %0, - %1, - double %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfrdiv.nxv2f64.f64( - , - double, - i64); - -define @intrinsic_vfrdiv_vf_nxv2f64_f64( %0, double %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv2f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrdiv.nxv2f64.f64( - %0, - double %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfrdiv.mask.nxv2f64.f64( - , - , - double, - , - i64, - i64); - -define @intrinsic_vfrdiv_mask_vf_nxv2f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv2f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vfrdiv.vf v8, v10, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrdiv.mask.nxv2f64.f64( - %0, - %1, - double %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfrdiv.nxv4f64.f64( - , - double, - i64); - -define @intrinsic_vfrdiv_vf_nxv4f64_f64( %0, double %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv4f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrdiv.nxv4f64.f64( - %0, - double %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfrdiv.mask.nxv4f64.f64( - , - , - double, - , - i64, - i64); - -define @intrinsic_vfrdiv_mask_vf_nxv4f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv4f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vfrdiv.vf v8, v12, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrdiv.mask.nxv4f64.f64( - %0, - %1, - double %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfrdiv.nxv8f64.f64( - , - double, - i64); - -define @intrinsic_vfrdiv_vf_nxv8f64_f64( %0, double %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv8f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu -; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrdiv.nxv8f64.f64( - %0, - double %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfrdiv.mask.nxv8f64.f64( - , - , - double, - , - i64, - i64); - -define @intrinsic_vfrdiv_mask_vf_nxv8f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv8f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu -; CHECK-NEXT: vfrdiv.vf v8, v16, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrdiv.mask.nxv8f64.f64( - %0, - %1, - double %2, - %3, - i64 %4, i64 1) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfrdiv-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfrdiv.ll similarity index 86% rename from llvm/test/CodeGen/RISCV/rvv/vfrdiv-rv32.ll rename to llvm/test/CodeGen/RISCV/rvv/vfrdiv.ll index 1d502c84b1981f2f679580c6d278d11826db1194..58dc39b995055ef3a129279b488aad452c01de09 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfrdiv-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfrdiv.ll @@ -1,12 +1,14 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: -target-abi=ilp32d < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfrdiv.nxv1f16.f16( , half, - i32); + iXLen); -define @intrinsic_vfrdiv_vf_nxv1f16_nxv1f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfrdiv_vf_nxv1f16_nxv1f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -16,7 +18,7 @@ entry: %a = call @llvm.riscv.vfrdiv.nxv1f16.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -26,10 +28,10 @@ declare @llvm.riscv.vfrdiv.mask.nxv1f16.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfrdiv_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfrdiv_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -41,7 +43,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -49,9 +51,9 @@ entry: declare @llvm.riscv.vfrdiv.nxv2f16.f16( , half, - i32); + iXLen); -define @intrinsic_vfrdiv_vf_nxv2f16_nxv2f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfrdiv_vf_nxv2f16_nxv2f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -61,7 +63,7 @@ entry: %a = call @llvm.riscv.vfrdiv.nxv2f16.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -71,10 +73,10 @@ declare @llvm.riscv.vfrdiv.mask.nxv2f16.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfrdiv_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfrdiv_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -86,7 +88,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -94,9 +96,9 @@ entry: declare @llvm.riscv.vfrdiv.nxv4f16.f16( , half, - i32); + iXLen); -define @intrinsic_vfrdiv_vf_nxv4f16_nxv4f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfrdiv_vf_nxv4f16_nxv4f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -106,7 +108,7 @@ entry: %a = call @llvm.riscv.vfrdiv.nxv4f16.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -116,10 +118,10 @@ declare @llvm.riscv.vfrdiv.mask.nxv4f16.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfrdiv_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfrdiv_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -131,7 +133,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -139,9 +141,9 @@ entry: declare @llvm.riscv.vfrdiv.nxv8f16.f16( , half, - i32); + iXLen); -define @intrinsic_vfrdiv_vf_nxv8f16_nxv8f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfrdiv_vf_nxv8f16_nxv8f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -151,7 +153,7 @@ entry: %a = call @llvm.riscv.vfrdiv.nxv8f16.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -161,10 +163,10 @@ declare @llvm.riscv.vfrdiv.mask.nxv8f16.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfrdiv_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfrdiv_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -176,7 +178,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -184,9 +186,9 @@ entry: declare @llvm.riscv.vfrdiv.nxv16f16.f16( , half, - i32); + iXLen); -define @intrinsic_vfrdiv_vf_nxv16f16_nxv16f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfrdiv_vf_nxv16f16_nxv16f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -196,7 +198,7 @@ entry: %a = call @llvm.riscv.vfrdiv.nxv16f16.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -206,10 +208,10 @@ declare @llvm.riscv.vfrdiv.mask.nxv16f16.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfrdiv_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfrdiv_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -221,7 +223,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -229,9 +231,9 @@ entry: declare @llvm.riscv.vfrdiv.nxv32f16.f16( , half, - i32); + iXLen); -define @intrinsic_vfrdiv_vf_nxv32f16_nxv32f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfrdiv_vf_nxv32f16_nxv32f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu @@ -241,7 +243,7 @@ entry: %a = call @llvm.riscv.vfrdiv.nxv32f16.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -251,10 +253,10 @@ declare @llvm.riscv.vfrdiv.mask.nxv32f16.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfrdiv_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfrdiv_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu @@ -266,7 +268,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -274,9 +276,9 @@ entry: declare @llvm.riscv.vfrdiv.nxv1f32.f32( , float, - i32); + iXLen); -define @intrinsic_vfrdiv_vf_nxv1f32_nxv1f32_f32( %0, float %1, i32 %2) nounwind { +define @intrinsic_vfrdiv_vf_nxv1f32_nxv1f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -286,7 +288,7 @@ entry: %a = call @llvm.riscv.vfrdiv.nxv1f32.f32( %0, float %1, - i32 %2) + iXLen %2) ret %a } @@ -296,10 +298,10 @@ declare @llvm.riscv.vfrdiv.mask.nxv1f32.f32( , float, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfrdiv_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +define @intrinsic_vfrdiv_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -311,7 +313,7 @@ entry: %1, float %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -319,9 +321,9 @@ entry: declare @llvm.riscv.vfrdiv.nxv2f32.f32( , float, - i32); + iXLen); -define @intrinsic_vfrdiv_vf_nxv2f32_nxv2f32_f32( %0, float %1, i32 %2) nounwind { +define @intrinsic_vfrdiv_vf_nxv2f32_nxv2f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -331,7 +333,7 @@ entry: %a = call @llvm.riscv.vfrdiv.nxv2f32.f32( %0, float %1, - i32 %2) + iXLen %2) ret %a } @@ -341,10 +343,10 @@ declare @llvm.riscv.vfrdiv.mask.nxv2f32.f32( , float, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfrdiv_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +define @intrinsic_vfrdiv_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -356,7 +358,7 @@ entry: %1, float %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -364,9 +366,9 @@ entry: declare @llvm.riscv.vfrdiv.nxv4f32.f32( , float, - i32); + iXLen); -define @intrinsic_vfrdiv_vf_nxv4f32_nxv4f32_f32( %0, float %1, i32 %2) nounwind { +define @intrinsic_vfrdiv_vf_nxv4f32_nxv4f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -376,7 +378,7 @@ entry: %a = call @llvm.riscv.vfrdiv.nxv4f32.f32( %0, float %1, - i32 %2) + iXLen %2) ret %a } @@ -386,10 +388,10 @@ declare @llvm.riscv.vfrdiv.mask.nxv4f32.f32( , float, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfrdiv_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +define @intrinsic_vfrdiv_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -401,7 +403,7 @@ entry: %1, float %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -409,9 +411,9 @@ entry: declare @llvm.riscv.vfrdiv.nxv8f32.f32( , float, - i32); + iXLen); -define @intrinsic_vfrdiv_vf_nxv8f32_nxv8f32_f32( %0, float %1, i32 %2) nounwind { +define @intrinsic_vfrdiv_vf_nxv8f32_nxv8f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -421,7 +423,7 @@ entry: %a = call @llvm.riscv.vfrdiv.nxv8f32.f32( %0, float %1, - i32 %2) + iXLen %2) ret %a } @@ -431,10 +433,10 @@ declare @llvm.riscv.vfrdiv.mask.nxv8f32.f32( , float, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfrdiv_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +define @intrinsic_vfrdiv_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -446,7 +448,7 @@ entry: %1, float %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -454,9 +456,9 @@ entry: declare @llvm.riscv.vfrdiv.nxv16f32.f32( , float, - i32); + iXLen); -define @intrinsic_vfrdiv_vf_nxv16f32_nxv16f32_f32( %0, float %1, i32 %2) nounwind { +define @intrinsic_vfrdiv_vf_nxv16f32_nxv16f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu @@ -466,7 +468,7 @@ entry: %a = call @llvm.riscv.vfrdiv.nxv16f32.f32( %0, float %1, - i32 %2) + iXLen %2) ret %a } @@ -476,10 +478,10 @@ declare @llvm.riscv.vfrdiv.mask.nxv16f32.f32( , float, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfrdiv_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +define @intrinsic_vfrdiv_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu @@ -491,7 +493,7 @@ entry: %1, float %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -499,9 +501,9 @@ entry: declare @llvm.riscv.vfrdiv.nxv1f64.f64( , double, - i32); + iXLen); -define @intrinsic_vfrdiv_vf_nxv1f64_nxv1f64_f64( %0, double %1, i32 %2) nounwind { +define @intrinsic_vfrdiv_vf_nxv1f64_nxv1f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu @@ -511,7 +513,7 @@ entry: %a = call @llvm.riscv.vfrdiv.nxv1f64.f64( %0, double %1, - i32 %2) + iXLen %2) ret %a } @@ -521,10 +523,10 @@ declare @llvm.riscv.vfrdiv.mask.nxv1f64.f64( , double, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfrdiv_mask_vf_nxv1f64_nxv1f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { +define @intrinsic_vfrdiv_mask_vf_nxv1f64_nxv1f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu @@ -536,7 +538,7 @@ entry: %1, double %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -544,9 +546,9 @@ entry: declare @llvm.riscv.vfrdiv.nxv2f64.f64( , double, - i32); + iXLen); -define @intrinsic_vfrdiv_vf_nxv2f64_nxv2f64_f64( %0, double %1, i32 %2) nounwind { +define @intrinsic_vfrdiv_vf_nxv2f64_nxv2f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu @@ -556,7 +558,7 @@ entry: %a = call @llvm.riscv.vfrdiv.nxv2f64.f64( %0, double %1, - i32 %2) + iXLen %2) ret %a } @@ -566,10 +568,10 @@ declare @llvm.riscv.vfrdiv.mask.nxv2f64.f64( , double, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfrdiv_mask_vf_nxv2f64_nxv2f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { +define @intrinsic_vfrdiv_mask_vf_nxv2f64_nxv2f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu @@ -581,7 +583,7 @@ entry: %1, double %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -589,9 +591,9 @@ entry: declare @llvm.riscv.vfrdiv.nxv4f64.f64( , double, - i32); + iXLen); -define @intrinsic_vfrdiv_vf_nxv4f64_nxv4f64_f64( %0, double %1, i32 %2) nounwind { +define @intrinsic_vfrdiv_vf_nxv4f64_nxv4f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu @@ -601,7 +603,7 @@ entry: %a = call @llvm.riscv.vfrdiv.nxv4f64.f64( %0, double %1, - i32 %2) + iXLen %2) ret %a } @@ -611,10 +613,10 @@ declare @llvm.riscv.vfrdiv.mask.nxv4f64.f64( , double, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfrdiv_mask_vf_nxv4f64_nxv4f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { +define @intrinsic_vfrdiv_mask_vf_nxv4f64_nxv4f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu @@ -626,7 +628,7 @@ entry: %1, double %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -634,9 +636,9 @@ entry: declare @llvm.riscv.vfrdiv.nxv8f64.f64( , double, - i32); + iXLen); -define @intrinsic_vfrdiv_vf_nxv8f64_nxv8f64_f64( %0, double %1, i32 %2) nounwind { +define @intrinsic_vfrdiv_vf_nxv8f64_nxv8f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu @@ -646,7 +648,7 @@ entry: %a = call @llvm.riscv.vfrdiv.nxv8f64.f64( %0, double %1, - i32 %2) + iXLen %2) ret %a } @@ -656,10 +658,10 @@ declare @llvm.riscv.vfrdiv.mask.nxv8f64.f64( , double, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfrdiv_mask_vf_nxv8f64_nxv8f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { +define @intrinsic_vfrdiv_mask_vf_nxv8f64_nxv8f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu @@ -671,7 +673,7 @@ entry: %1, double %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfrec7-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfrec7-rv64.ll deleted file mode 100644 index 4e0fe7b9fc62f15bb5f3aef24ee7f6a587188005..0000000000000000000000000000000000000000 --- a/llvm/test/CodeGen/RISCV/rvv/vfrec7-rv64.ll +++ /dev/null @@ -1,617 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s -declare @llvm.riscv.vfrec7.nxv1f16( - , - i64); - -define @intrinsic_vfrec7_v_nxv1f16_nxv1f16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfrec7_v_nxv1f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfrec7.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrec7.nxv1f16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfrec7.mask.nxv1f16( - , - , - , - i64, - i64); - -define @intrinsic_vfrec7_mask_v_nxv1f16_nxv1f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv1f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfrec7.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrec7.mask.nxv1f16( - %1, - %2, - %0, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfrec7.nxv2f16( - , - i64); - -define @intrinsic_vfrec7_v_nxv2f16_nxv2f16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfrec7_v_nxv2f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfrec7.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrec7.nxv2f16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfrec7.mask.nxv2f16( - , - , - , - i64, - i64); - -define @intrinsic_vfrec7_mask_v_nxv2f16_nxv2f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv2f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfrec7.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrec7.mask.nxv2f16( - %1, - %2, - %0, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfrec7.nxv4f16( - , - i64); - -define @intrinsic_vfrec7_v_nxv4f16_nxv4f16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfrec7_v_nxv4f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfrec7.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrec7.nxv4f16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfrec7.mask.nxv4f16( - , - , - , - i64, - i64); - -define @intrinsic_vfrec7_mask_v_nxv4f16_nxv4f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv4f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfrec7.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrec7.mask.nxv4f16( - %1, - %2, - %0, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfrec7.nxv8f16( - , - i64); - -define @intrinsic_vfrec7_v_nxv8f16_nxv8f16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfrec7_v_nxv8f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfrec7.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrec7.nxv8f16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfrec7.mask.nxv8f16( - , - , - , - i64, - i64); - -define @intrinsic_vfrec7_mask_v_nxv8f16_nxv8f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv8f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfrec7.v v8, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrec7.mask.nxv8f16( - %1, - %2, - %0, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfrec7.nxv16f16( - , - i64); - -define @intrinsic_vfrec7_v_nxv16f16_nxv16f16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfrec7_v_nxv16f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfrec7.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrec7.nxv16f16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfrec7.mask.nxv16f16( - , - , - , - i64, - i64); - -define @intrinsic_vfrec7_mask_v_nxv16f16_nxv16f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv16f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfrec7.v v8, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrec7.mask.nxv16f16( - %1, - %2, - %0, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfrec7.nxv32f16( - , - i64); - -define @intrinsic_vfrec7_v_nxv32f16_nxv32f16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfrec7_v_nxv32f16_nxv32f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu -; CHECK-NEXT: vfrec7.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrec7.nxv32f16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfrec7.mask.nxv32f16( - , - , - , - i64, - i64); - -define @intrinsic_vfrec7_mask_v_nxv32f16_nxv32f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv32f16_nxv32f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu -; CHECK-NEXT: vfrec7.v v8, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrec7.mask.nxv32f16( - %1, - %2, - %0, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfrec7.nxv1f32( - , - i64); - -define @intrinsic_vfrec7_v_nxv1f32_nxv1f32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfrec7_v_nxv1f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfrec7.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrec7.nxv1f32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfrec7.mask.nxv1f32( - , - , - , - i64, - i64); - -define @intrinsic_vfrec7_mask_v_nxv1f32_nxv1f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv1f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfrec7.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrec7.mask.nxv1f32( - %1, - %2, - %0, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfrec7.nxv2f32( - , - i64); - -define @intrinsic_vfrec7_v_nxv2f32_nxv2f32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfrec7_v_nxv2f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfrec7.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrec7.nxv2f32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfrec7.mask.nxv2f32( - , - , - , - i64, - i64); - -define @intrinsic_vfrec7_mask_v_nxv2f32_nxv2f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv2f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfrec7.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrec7.mask.nxv2f32( - %1, - %2, - %0, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfrec7.nxv4f32( - , - i64); - -define @intrinsic_vfrec7_v_nxv4f32_nxv4f32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfrec7_v_nxv4f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfrec7.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrec7.nxv4f32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfrec7.mask.nxv4f32( - , - , - , - i64, - i64); - -define @intrinsic_vfrec7_mask_v_nxv4f32_nxv4f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv4f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfrec7.v v8, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrec7.mask.nxv4f32( - %1, - %2, - %0, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfrec7.nxv8f32( - , - i64); - -define @intrinsic_vfrec7_v_nxv8f32_nxv8f32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfrec7_v_nxv8f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfrec7.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrec7.nxv8f32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfrec7.mask.nxv8f32( - , - , - , - i64, - i64); - -define @intrinsic_vfrec7_mask_v_nxv8f32_nxv8f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv8f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfrec7.v v8, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrec7.mask.nxv8f32( - %1, - %2, - %0, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfrec7.nxv16f32( - , - i64); - -define @intrinsic_vfrec7_v_nxv16f32_nxv16f32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfrec7_v_nxv16f32_nxv16f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu -; CHECK-NEXT: vfrec7.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrec7.nxv16f32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfrec7.mask.nxv16f32( - , - , - , - i64, - i64); - -define @intrinsic_vfrec7_mask_v_nxv16f32_nxv16f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv16f32_nxv16f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu -; CHECK-NEXT: vfrec7.v v8, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrec7.mask.nxv16f32( - %1, - %2, - %0, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfrec7.nxv1f64( - , - i64); - -define @intrinsic_vfrec7_v_nxv1f64_nxv1f64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfrec7_v_nxv1f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vfrec7.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrec7.nxv1f64( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfrec7.mask.nxv1f64( - , - , - , - i64, - i64); - -define @intrinsic_vfrec7_mask_v_nxv1f64_nxv1f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv1f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vfrec7.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrec7.mask.nxv1f64( - %1, - %2, - %0, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfrec7.nxv2f64( - , - i64); - -define @intrinsic_vfrec7_v_nxv2f64_nxv2f64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfrec7_v_nxv2f64_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vfrec7.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrec7.nxv2f64( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfrec7.mask.nxv2f64( - , - , - , - i64, - i64); - -define @intrinsic_vfrec7_mask_v_nxv2f64_nxv2f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv2f64_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vfrec7.v v8, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrec7.mask.nxv2f64( - %1, - %2, - %0, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfrec7.nxv4f64( - , - i64); - -define @intrinsic_vfrec7_v_nxv4f64_nxv4f64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfrec7_v_nxv4f64_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vfrec7.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrec7.nxv4f64( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfrec7.mask.nxv4f64( - , - , - , - i64, - i64); - -define @intrinsic_vfrec7_mask_v_nxv4f64_nxv4f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv4f64_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vfrec7.v v8, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrec7.mask.nxv4f64( - %1, - %2, - %0, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfrec7.nxv8f64( - , - i64); - -define @intrinsic_vfrec7_v_nxv8f64_nxv8f64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfrec7_v_nxv8f64_nxv8f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu -; CHECK-NEXT: vfrec7.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrec7.nxv8f64( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfrec7.mask.nxv8f64( - , - , - , - i64, - i64); - -define @intrinsic_vfrec7_mask_v_nxv8f64_nxv8f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv8f64_nxv8f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu -; CHECK-NEXT: vfrec7.v v8, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrec7.mask.nxv8f64( - %1, - %2, - %0, - i64 %3, i64 1) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfrec7-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfrec7.ll similarity index 87% rename from llvm/test/CodeGen/RISCV/rvv/vfrec7-rv32.ll rename to llvm/test/CodeGen/RISCV/rvv/vfrec7.ll index 30897b95deea291f4bc1be18e88fd0cd5344d123..3be9f912d7f2a296380c73d9e876670aa324d454 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfrec7-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfrec7.ll @@ -1,11 +1,13 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfrec7.nxv1f16( , - i32); + iXLen); -define @intrinsic_vfrec7_v_nxv1f16_nxv1f16( %0, i32 %1) nounwind { +define @intrinsic_vfrec7_v_nxv1f16_nxv1f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_v_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -14,7 +16,7 @@ define @intrinsic_vfrec7_v_nxv1f16_nxv1f16( @llvm.riscv.vfrec7.nxv1f16( %0, - i32 %1) + iXLen %1) ret %a } @@ -23,10 +25,10 @@ declare @llvm.riscv.vfrec7.mask.nxv1f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfrec7_mask_v_nxv1f16_nxv1f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfrec7_mask_v_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -37,16 +39,16 @@ entry: %1, %2, %0, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfrec7.nxv2f16( , - i32); + iXLen); -define @intrinsic_vfrec7_v_nxv2f16_nxv2f16( %0, i32 %1) nounwind { +define @intrinsic_vfrec7_v_nxv2f16_nxv2f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_v_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -55,7 +57,7 @@ define @intrinsic_vfrec7_v_nxv2f16_nxv2f16( @llvm.riscv.vfrec7.nxv2f16( %0, - i32 %1) + iXLen %1) ret %a } @@ -64,10 +66,10 @@ declare @llvm.riscv.vfrec7.mask.nxv2f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfrec7_mask_v_nxv2f16_nxv2f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfrec7_mask_v_nxv2f16_nxv2f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -78,16 +80,16 @@ entry: %1, %2, %0, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfrec7.nxv4f16( , - i32); + iXLen); -define @intrinsic_vfrec7_v_nxv4f16_nxv4f16( %0, i32 %1) nounwind { +define @intrinsic_vfrec7_v_nxv4f16_nxv4f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_v_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -96,7 +98,7 @@ define @intrinsic_vfrec7_v_nxv4f16_nxv4f16( @llvm.riscv.vfrec7.nxv4f16( %0, - i32 %1) + iXLen %1) ret %a } @@ -105,10 +107,10 @@ declare @llvm.riscv.vfrec7.mask.nxv4f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfrec7_mask_v_nxv4f16_nxv4f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfrec7_mask_v_nxv4f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -119,16 +121,16 @@ entry: %1, %2, %0, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfrec7.nxv8f16( , - i32); + iXLen); -define @intrinsic_vfrec7_v_nxv8f16_nxv8f16( %0, i32 %1) nounwind { +define @intrinsic_vfrec7_v_nxv8f16_nxv8f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_v_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -137,7 +139,7 @@ define @intrinsic_vfrec7_v_nxv8f16_nxv8f16( @llvm.riscv.vfrec7.nxv8f16( %0, - i32 %1) + iXLen %1) ret %a } @@ -146,10 +148,10 @@ declare @llvm.riscv.vfrec7.mask.nxv8f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfrec7_mask_v_nxv8f16_nxv8f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfrec7_mask_v_nxv8f16_nxv8f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -160,16 +162,16 @@ entry: %1, %2, %0, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfrec7.nxv16f16( , - i32); + iXLen); -define @intrinsic_vfrec7_v_nxv16f16_nxv16f16( %0, i32 %1) nounwind { +define @intrinsic_vfrec7_v_nxv16f16_nxv16f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_v_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -178,7 +180,7 @@ define @intrinsic_vfrec7_v_nxv16f16_nxv16f16( @llvm.riscv.vfrec7.nxv16f16( %0, - i32 %1) + iXLen %1) ret %a } @@ -187,10 +189,10 @@ declare @llvm.riscv.vfrec7.mask.nxv16f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfrec7_mask_v_nxv16f16_nxv16f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfrec7_mask_v_nxv16f16_nxv16f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -201,16 +203,16 @@ entry: %1, %2, %0, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfrec7.nxv32f16( , - i32); + iXLen); -define @intrinsic_vfrec7_v_nxv32f16_nxv32f16( %0, i32 %1) nounwind { +define @intrinsic_vfrec7_v_nxv32f16_nxv32f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_v_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu @@ -219,7 +221,7 @@ define @intrinsic_vfrec7_v_nxv32f16_nxv32f16( @llvm.riscv.vfrec7.nxv32f16( %0, - i32 %1) + iXLen %1) ret %a } @@ -228,10 +230,10 @@ declare @llvm.riscv.vfrec7.mask.nxv32f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfrec7_mask_v_nxv32f16_nxv32f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfrec7_mask_v_nxv32f16_nxv32f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu @@ -242,16 +244,16 @@ entry: %1, %2, %0, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfrec7.nxv1f32( , - i32); + iXLen); -define @intrinsic_vfrec7_v_nxv1f32_nxv1f32( %0, i32 %1) nounwind { +define @intrinsic_vfrec7_v_nxv1f32_nxv1f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_v_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -260,7 +262,7 @@ define @intrinsic_vfrec7_v_nxv1f32_nxv1f32( @llvm.riscv.vfrec7.nxv1f32( %0, - i32 %1) + iXLen %1) ret %a } @@ -269,10 +271,10 @@ declare @llvm.riscv.vfrec7.mask.nxv1f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfrec7_mask_v_nxv1f32_nxv1f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfrec7_mask_v_nxv1f32_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -283,16 +285,16 @@ entry: %1, %2, %0, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfrec7.nxv2f32( , - i32); + iXLen); -define @intrinsic_vfrec7_v_nxv2f32_nxv2f32( %0, i32 %1) nounwind { +define @intrinsic_vfrec7_v_nxv2f32_nxv2f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_v_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -301,7 +303,7 @@ define @intrinsic_vfrec7_v_nxv2f32_nxv2f32( @llvm.riscv.vfrec7.nxv2f32( %0, - i32 %1) + iXLen %1) ret %a } @@ -310,10 +312,10 @@ declare @llvm.riscv.vfrec7.mask.nxv2f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfrec7_mask_v_nxv2f32_nxv2f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfrec7_mask_v_nxv2f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -324,16 +326,16 @@ entry: %1, %2, %0, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfrec7.nxv4f32( , - i32); + iXLen); -define @intrinsic_vfrec7_v_nxv4f32_nxv4f32( %0, i32 %1) nounwind { +define @intrinsic_vfrec7_v_nxv4f32_nxv4f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_v_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -342,7 +344,7 @@ define @intrinsic_vfrec7_v_nxv4f32_nxv4f32( @llvm.riscv.vfrec7.nxv4f32( %0, - i32 %1) + iXLen %1) ret %a } @@ -351,10 +353,10 @@ declare @llvm.riscv.vfrec7.mask.nxv4f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfrec7_mask_v_nxv4f32_nxv4f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfrec7_mask_v_nxv4f32_nxv4f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -365,16 +367,16 @@ entry: %1, %2, %0, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfrec7.nxv8f32( , - i32); + iXLen); -define @intrinsic_vfrec7_v_nxv8f32_nxv8f32( %0, i32 %1) nounwind { +define @intrinsic_vfrec7_v_nxv8f32_nxv8f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_v_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -383,7 +385,7 @@ define @intrinsic_vfrec7_v_nxv8f32_nxv8f32( @llvm.riscv.vfrec7.nxv8f32( %0, - i32 %1) + iXLen %1) ret %a } @@ -392,10 +394,10 @@ declare @llvm.riscv.vfrec7.mask.nxv8f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfrec7_mask_v_nxv8f32_nxv8f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfrec7_mask_v_nxv8f32_nxv8f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -406,16 +408,16 @@ entry: %1, %2, %0, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfrec7.nxv16f32( , - i32); + iXLen); -define @intrinsic_vfrec7_v_nxv16f32_nxv16f32( %0, i32 %1) nounwind { +define @intrinsic_vfrec7_v_nxv16f32_nxv16f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_v_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu @@ -424,7 +426,7 @@ define @intrinsic_vfrec7_v_nxv16f32_nxv16f32( @llvm.riscv.vfrec7.nxv16f32( %0, - i32 %1) + iXLen %1) ret %a } @@ -433,10 +435,10 @@ declare @llvm.riscv.vfrec7.mask.nxv16f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfrec7_mask_v_nxv16f32_nxv16f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfrec7_mask_v_nxv16f32_nxv16f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu @@ -447,16 +449,16 @@ entry: %1, %2, %0, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfrec7.nxv1f64( , - i32); + iXLen); -define @intrinsic_vfrec7_v_nxv1f64_nxv1f64( %0, i32 %1) nounwind { +define @intrinsic_vfrec7_v_nxv1f64_nxv1f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_v_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu @@ -465,7 +467,7 @@ define @intrinsic_vfrec7_v_nxv1f64_nxv1f64( @llvm.riscv.vfrec7.nxv1f64( %0, - i32 %1) + iXLen %1) ret %a } @@ -474,10 +476,10 @@ declare @llvm.riscv.vfrec7.mask.nxv1f64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfrec7_mask_v_nxv1f64_nxv1f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfrec7_mask_v_nxv1f64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu @@ -488,16 +490,16 @@ entry: %1, %2, %0, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfrec7.nxv2f64( , - i32); + iXLen); -define @intrinsic_vfrec7_v_nxv2f64_nxv2f64( %0, i32 %1) nounwind { +define @intrinsic_vfrec7_v_nxv2f64_nxv2f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_v_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu @@ -506,7 +508,7 @@ define @intrinsic_vfrec7_v_nxv2f64_nxv2f64( @llvm.riscv.vfrec7.nxv2f64( %0, - i32 %1) + iXLen %1) ret %a } @@ -515,10 +517,10 @@ declare @llvm.riscv.vfrec7.mask.nxv2f64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfrec7_mask_v_nxv2f64_nxv2f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfrec7_mask_v_nxv2f64_nxv2f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu @@ -529,16 +531,16 @@ entry: %1, %2, %0, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfrec7.nxv4f64( , - i32); + iXLen); -define @intrinsic_vfrec7_v_nxv4f64_nxv4f64( %0, i32 %1) nounwind { +define @intrinsic_vfrec7_v_nxv4f64_nxv4f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_v_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu @@ -547,7 +549,7 @@ define @intrinsic_vfrec7_v_nxv4f64_nxv4f64( @llvm.riscv.vfrec7.nxv4f64( %0, - i32 %1) + iXLen %1) ret %a } @@ -556,10 +558,10 @@ declare @llvm.riscv.vfrec7.mask.nxv4f64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfrec7_mask_v_nxv4f64_nxv4f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfrec7_mask_v_nxv4f64_nxv4f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu @@ -570,16 +572,16 @@ entry: %1, %2, %0, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfrec7.nxv8f64( , - i32); + iXLen); -define @intrinsic_vfrec7_v_nxv8f64_nxv8f64( %0, i32 %1) nounwind { +define @intrinsic_vfrec7_v_nxv8f64_nxv8f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_v_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu @@ -588,7 +590,7 @@ define @intrinsic_vfrec7_v_nxv8f64_nxv8f64( @llvm.riscv.vfrec7.nxv8f64( %0, - i32 %1) + iXLen %1) ret %a } @@ -597,10 +599,10 @@ declare @llvm.riscv.vfrec7.mask.nxv8f64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfrec7_mask_v_nxv8f64_nxv8f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfrec7_mask_v_nxv8f64_nxv8f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu @@ -611,7 +613,7 @@ entry: %1, %2, %0, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfredmax-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfredmax-rv64.ll deleted file mode 100644 index bb173e91ecf3185a0edf3e5400a80f2fde9a1eab..0000000000000000000000000000000000000000 --- a/llvm/test/CodeGen/RISCV/rvv/vfredmax-rv64.ll +++ /dev/null @@ -1,692 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s -declare @llvm.riscv.vfredmax.nxv4f16.nxv1f16( - , - , - , - i64); - -define @intrinsic_vfredmax_vs_nxv4f16_nxv1f16_nxv4f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfredmax_vs_nxv4f16_nxv1f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu -; CHECK-NEXT: vfredmax.vs v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredmax.nxv4f16.nxv1f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfredmax.mask.nxv4f16.nxv1f16( - , - , - , - , - i64); - -define @intrinsic_vfredmax_mask_vs_nxv4f16_nxv1f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv4f16_nxv1f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu -; CHECK-NEXT: vfredmax.vs v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredmax.mask.nxv4f16.nxv1f16( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfredmax.nxv4f16.nxv2f16( - , - , - , - i64); - -define @intrinsic_vfredmax_vs_nxv4f16_nxv2f16_nxv4f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfredmax_vs_nxv4f16_nxv2f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu -; CHECK-NEXT: vfredmax.vs v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredmax.nxv4f16.nxv2f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfredmax.mask.nxv4f16.nxv2f16( - , - , - , - , - i64); - -define @intrinsic_vfredmax_mask_vs_nxv4f16_nxv2f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv4f16_nxv2f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu -; CHECK-NEXT: vfredmax.vs v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredmax.mask.nxv4f16.nxv2f16( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfredmax.nxv4f16.nxv4f16( - , - , - , - i64); - -define @intrinsic_vfredmax_vs_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfredmax_vs_nxv4f16_nxv4f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu -; CHECK-NEXT: vfredmax.vs v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredmax.nxv4f16.nxv4f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfredmax.mask.nxv4f16.nxv4f16( - , - , - , - , - i64); - -define @intrinsic_vfredmax_mask_vs_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv4f16_nxv4f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu -; CHECK-NEXT: vfredmax.vs v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredmax.mask.nxv4f16.nxv4f16( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfredmax.nxv4f16.nxv8f16( - , - , - , - i64); - -define @intrinsic_vfredmax_vs_nxv4f16_nxv8f16_nxv4f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfredmax_vs_nxv4f16_nxv8f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu -; CHECK-NEXT: vfredmax.vs v8, v10, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredmax.nxv4f16.nxv8f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfredmax.mask.nxv4f16.nxv8f16( - , - , - , - , - i64); - -define @intrinsic_vfredmax_mask_vs_nxv4f16_nxv8f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv4f16_nxv8f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu -; CHECK-NEXT: vfredmax.vs v8, v10, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredmax.mask.nxv4f16.nxv8f16( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfredmax.nxv4f16.nxv16f16( - , - , - , - i64); - -define @intrinsic_vfredmax_vs_nxv4f16_nxv16f16_nxv4f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfredmax_vs_nxv4f16_nxv16f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu -; CHECK-NEXT: vfredmax.vs v8, v12, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredmax.nxv4f16.nxv16f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfredmax.mask.nxv4f16.nxv16f16( - , - , - , - , - i64); - -define @intrinsic_vfredmax_mask_vs_nxv4f16_nxv16f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv4f16_nxv16f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu -; CHECK-NEXT: vfredmax.vs v8, v12, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredmax.mask.nxv4f16.nxv16f16( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfredmax.nxv4f16.nxv32f16( - , - , - , - i64); - -define @intrinsic_vfredmax_vs_nxv4f16_nxv32f16_nxv4f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfredmax_vs_nxv4f16_nxv32f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu -; CHECK-NEXT: vfredmax.vs v8, v16, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredmax.nxv4f16.nxv32f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfredmax.mask.nxv4f16.nxv32f16( - , - , - , - , - i64); - -define @intrinsic_vfredmax_mask_vs_nxv4f16_nxv32f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv4f16_nxv32f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu -; CHECK-NEXT: vfredmax.vs v8, v16, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredmax.mask.nxv4f16.nxv32f16( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfredmax.nxv2f32.nxv1f32( - , - , - , - i64); - -define @intrinsic_vfredmax_vs_nxv2f32_nxv1f32_nxv2f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfredmax_vs_nxv2f32_nxv1f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu -; CHECK-NEXT: vfredmax.vs v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredmax.nxv2f32.nxv1f32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfredmax.mask.nxv2f32.nxv1f32( - , - , - , - , - i64); - -define @intrinsic_vfredmax_mask_vs_nxv2f32_nxv1f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv2f32_nxv1f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu -; CHECK-NEXT: vfredmax.vs v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv1f32( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfredmax.nxv2f32.nxv2f32( - , - , - , - i64); - -define @intrinsic_vfredmax_vs_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfredmax_vs_nxv2f32_nxv2f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu -; CHECK-NEXT: vfredmax.vs v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredmax.nxv2f32.nxv2f32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfredmax.mask.nxv2f32.nxv2f32( - , - , - , - , - i64); - -define @intrinsic_vfredmax_mask_vs_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv2f32_nxv2f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu -; CHECK-NEXT: vfredmax.vs v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv2f32( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfredmax.nxv2f32.nxv4f32( - , - , - , - i64); - -define @intrinsic_vfredmax_vs_nxv2f32_nxv4f32_nxv2f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfredmax_vs_nxv2f32_nxv4f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu -; CHECK-NEXT: vfredmax.vs v8, v10, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredmax.nxv2f32.nxv4f32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfredmax.mask.nxv2f32.nxv4f32( - , - , - , - , - i64); - -define @intrinsic_vfredmax_mask_vs_nxv2f32_nxv4f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv2f32_nxv4f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu -; CHECK-NEXT: vfredmax.vs v8, v10, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv4f32( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfredmax.nxv2f32.nxv8f32( - , - , - , - i64); - -define @intrinsic_vfredmax_vs_nxv2f32_nxv8f32_nxv2f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfredmax_vs_nxv2f32_nxv8f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu -; CHECK-NEXT: vfredmax.vs v8, v12, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredmax.nxv2f32.nxv8f32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfredmax.mask.nxv2f32.nxv8f32( - , - , - , - , - i64); - -define @intrinsic_vfredmax_mask_vs_nxv2f32_nxv8f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv2f32_nxv8f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu -; CHECK-NEXT: vfredmax.vs v8, v12, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv8f32( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfredmax.nxv2f32.nxv16f32( - , - , - , - i64); - -define @intrinsic_vfredmax_vs_nxv2f32_nxv16f32_nxv2f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfredmax_vs_nxv2f32_nxv16f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu -; CHECK-NEXT: vfredmax.vs v8, v16, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredmax.nxv2f32.nxv16f32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfredmax.mask.nxv2f32.nxv16f32( - , - , - , - , - i64); - -define @intrinsic_vfredmax_mask_vs_nxv2f32_nxv16f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv2f32_nxv16f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu -; CHECK-NEXT: vfredmax.vs v8, v16, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv16f32( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfredmax.nxv1f64.nxv1f64( - , - , - , - i64); - -define @intrinsic_vfredmax_vs_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfredmax_vs_nxv1f64_nxv1f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu -; CHECK-NEXT: vfredmax.vs v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredmax.nxv1f64.nxv1f64( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfredmax.mask.nxv1f64.nxv1f64( - , - , - , - , - i64); - -define @intrinsic_vfredmax_mask_vs_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv1f64_nxv1f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu -; CHECK-NEXT: vfredmax.vs v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredmax.mask.nxv1f64.nxv1f64( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfredmax.nxv1f64.nxv2f64( - , - , - , - i64); - -define @intrinsic_vfredmax_vs_nxv1f64_nxv2f64_nxv1f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfredmax_vs_nxv1f64_nxv2f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu -; CHECK-NEXT: vfredmax.vs v8, v10, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredmax.nxv1f64.nxv2f64( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfredmax.mask.nxv1f64.nxv2f64( - , - , - , - , - i64); - -define @intrinsic_vfredmax_mask_vs_nxv1f64_nxv2f64_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv1f64_nxv2f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu -; CHECK-NEXT: vfredmax.vs v8, v10, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredmax.mask.nxv1f64.nxv2f64( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfredmax.nxv1f64.nxv4f64( - , - , - , - i64); - -define @intrinsic_vfredmax_vs_nxv1f64_nxv4f64_nxv1f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfredmax_vs_nxv1f64_nxv4f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu -; CHECK-NEXT: vfredmax.vs v8, v12, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredmax.nxv1f64.nxv4f64( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfredmax.mask.nxv1f64.nxv4f64( - , - , - , - , - i64); - -define @intrinsic_vfredmax_mask_vs_nxv1f64_nxv4f64_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv1f64_nxv4f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu -; CHECK-NEXT: vfredmax.vs v8, v12, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredmax.mask.nxv1f64.nxv4f64( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfredmax.nxv1f64.nxv8f64( - , - , - , - i64); - -define @intrinsic_vfredmax_vs_nxv1f64_nxv8f64_nxv1f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfredmax_vs_nxv1f64_nxv8f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu -; CHECK-NEXT: vfredmax.vs v8, v16, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredmax.nxv1f64.nxv8f64( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfredmax.mask.nxv1f64.nxv8f64( - , - , - , - , - i64); - -define @intrinsic_vfredmax_mask_vs_nxv1f64_nxv8f64_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv1f64_nxv8f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu -; CHECK-NEXT: vfredmax.vs v8, v16, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredmax.mask.nxv1f64.nxv8f64( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfredmax-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfredmax.ll similarity index 88% rename from llvm/test/CodeGen/RISCV/rvv/vfredmax-rv32.ll rename to llvm/test/CodeGen/RISCV/rvv/vfredmax.ll index 25ed3f1ab36763d90edcab7759b462ad4f2b9fd8..0a2d72bf382aaa1d5ec89efb4eae37fc5c2da689 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfredmax-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfredmax.ll @@ -1,13 +1,15 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfredmax.nxv4f16.nxv1f16( , , , - i32); + iXLen); -define @intrinsic_vfredmax_vs_nxv4f16_nxv1f16_nxv4f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfredmax_vs_nxv4f16_nxv1f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_vs_nxv4f16_nxv1f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu @@ -18,7 +20,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -28,9 +30,9 @@ declare @llvm.riscv.vfredmax.mask.nxv4f16.nxv1f16.nxv1i1( , , , - i32); + iXLen); -define @intrinsic_vfredmax_mask_vs_nxv4f16_nxv1f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfredmax_mask_vs_nxv4f16_nxv1f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv4f16_nxv1f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu @@ -42,7 +44,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -51,9 +53,9 @@ declare @llvm.riscv.vfredmax.nxv4f16.nxv2f16( , , , - i32); + iXLen); -define @intrinsic_vfredmax_vs_nxv4f16_nxv2f16_nxv4f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfredmax_vs_nxv4f16_nxv2f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_vs_nxv4f16_nxv2f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu @@ -64,7 +66,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -74,9 +76,9 @@ declare @llvm.riscv.vfredmax.mask.nxv4f16.nxv2f16.nxv2i1( , , , - i32); + iXLen); -define @intrinsic_vfredmax_mask_vs_nxv4f16_nxv2f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfredmax_mask_vs_nxv4f16_nxv2f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv4f16_nxv2f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu @@ -88,7 +90,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -97,9 +99,9 @@ declare @llvm.riscv.vfredmax.nxv4f16.nxv4f16( , , , - i32); + iXLen); -define @intrinsic_vfredmax_vs_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfredmax_vs_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_vs_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu @@ -110,7 +112,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -120,9 +122,9 @@ declare @llvm.riscv.vfredmax.mask.nxv4f16.nxv4f16.nxv4i1( , , , - i32); + iXLen); -define @intrinsic_vfredmax_mask_vs_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfredmax_mask_vs_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu @@ -134,7 +136,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -143,9 +145,9 @@ declare @llvm.riscv.vfredmax.nxv4f16.nxv8f16( , , , - i32); + iXLen); -define @intrinsic_vfredmax_vs_nxv4f16_nxv8f16_nxv4f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfredmax_vs_nxv4f16_nxv8f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_vs_nxv4f16_nxv8f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu @@ -156,7 +158,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -166,9 +168,9 @@ declare @llvm.riscv.vfredmax.mask.nxv4f16.nxv8f16.nxv8i1( , , , - i32); + iXLen); -define @intrinsic_vfredmax_mask_vs_nxv4f16_nxv8f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfredmax_mask_vs_nxv4f16_nxv8f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv4f16_nxv8f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu @@ -180,7 +182,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -189,9 +191,9 @@ declare @llvm.riscv.vfredmax.nxv4f16.nxv16f16( , , , - i32); + iXLen); -define @intrinsic_vfredmax_vs_nxv4f16_nxv16f16_nxv4f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfredmax_vs_nxv4f16_nxv16f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_vs_nxv4f16_nxv16f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu @@ -202,7 +204,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -212,9 +214,9 @@ declare @llvm.riscv.vfredmax.mask.nxv4f16.nxv16f16.nxv16i1( , , , - i32); + iXLen); -define @intrinsic_vfredmax_mask_vs_nxv4f16_nxv16f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfredmax_mask_vs_nxv4f16_nxv16f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv4f16_nxv16f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu @@ -226,7 +228,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -235,9 +237,9 @@ declare @llvm.riscv.vfredmax.nxv4f16.nxv32f16( , , , - i32); + iXLen); -define @intrinsic_vfredmax_vs_nxv4f16_nxv32f16_nxv4f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfredmax_vs_nxv4f16_nxv32f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_vs_nxv4f16_nxv32f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu @@ -248,7 +250,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -258,9 +260,9 @@ declare @llvm.riscv.vfredmax.mask.nxv4f16.nxv32f16.nxv32i1( , , , - i32); + iXLen); -define @intrinsic_vfredmax_mask_vs_nxv4f16_nxv32f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfredmax_mask_vs_nxv4f16_nxv32f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv4f16_nxv32f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu @@ -272,7 +274,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -281,9 +283,9 @@ declare @llvm.riscv.vfredmax.nxv2f32.nxv1f32( , , , - i32); + iXLen); -define @intrinsic_vfredmax_vs_nxv2f32_nxv1f32_nxv2f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfredmax_vs_nxv2f32_nxv1f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_vs_nxv2f32_nxv1f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu @@ -294,7 +296,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -304,9 +306,9 @@ declare @llvm.riscv.vfredmax.mask.nxv2f32.nxv1f32.nxv1i1( , , , - i32); + iXLen); -define @intrinsic_vfredmax_mask_vs_nxv2f32_nxv1f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfredmax_mask_vs_nxv2f32_nxv1f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv2f32_nxv1f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu @@ -318,7 +320,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -327,9 +329,9 @@ declare @llvm.riscv.vfredmax.nxv2f32.nxv2f32( , , , - i32); + iXLen); -define @intrinsic_vfredmax_vs_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfredmax_vs_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_vs_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu @@ -340,7 +342,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -350,9 +352,9 @@ declare @llvm.riscv.vfredmax.mask.nxv2f32.nxv2f32.nxv2i1( , , , - i32); + iXLen); -define @intrinsic_vfredmax_mask_vs_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfredmax_mask_vs_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu @@ -364,7 +366,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -373,9 +375,9 @@ declare @llvm.riscv.vfredmax.nxv2f32.nxv4f32( , , , - i32); + iXLen); -define @intrinsic_vfredmax_vs_nxv2f32_nxv4f32_nxv2f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfredmax_vs_nxv2f32_nxv4f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_vs_nxv2f32_nxv4f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu @@ -386,7 +388,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -396,9 +398,9 @@ declare @llvm.riscv.vfredmax.mask.nxv2f32.nxv4f32.nxv4i1( , , , - i32); + iXLen); -define @intrinsic_vfredmax_mask_vs_nxv2f32_nxv4f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfredmax_mask_vs_nxv2f32_nxv4f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv2f32_nxv4f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu @@ -410,7 +412,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -419,9 +421,9 @@ declare @llvm.riscv.vfredmax.nxv2f32.nxv8f32( , , , - i32); + iXLen); -define @intrinsic_vfredmax_vs_nxv2f32_nxv8f32_nxv2f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfredmax_vs_nxv2f32_nxv8f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_vs_nxv2f32_nxv8f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu @@ -432,7 +434,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -442,9 +444,9 @@ declare @llvm.riscv.vfredmax.mask.nxv2f32.nxv8f32.nxv8i1( , , , - i32); + iXLen); -define @intrinsic_vfredmax_mask_vs_nxv2f32_nxv8f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfredmax_mask_vs_nxv2f32_nxv8f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv2f32_nxv8f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu @@ -456,7 +458,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -465,9 +467,9 @@ declare @llvm.riscv.vfredmax.nxv2f32.nxv16f32( , , , - i32); + iXLen); -define @intrinsic_vfredmax_vs_nxv2f32_nxv16f32_nxv2f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfredmax_vs_nxv2f32_nxv16f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_vs_nxv2f32_nxv16f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu @@ -478,7 +480,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -488,9 +490,9 @@ declare @llvm.riscv.vfredmax.mask.nxv2f32.nxv16f32.nxv16i1( , , , - i32); + iXLen); -define @intrinsic_vfredmax_mask_vs_nxv2f32_nxv16f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfredmax_mask_vs_nxv2f32_nxv16f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv2f32_nxv16f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu @@ -502,7 +504,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -511,9 +513,9 @@ declare @llvm.riscv.vfredmax.nxv1f64.nxv1f64( , , , - i32); + iXLen); -define @intrinsic_vfredmax_vs_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfredmax_vs_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_vs_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu @@ -524,7 +526,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -534,9 +536,9 @@ declare @llvm.riscv.vfredmax.mask.nxv1f64.nxv1f64.nxv1i1( , , , - i32); + iXLen); -define @intrinsic_vfredmax_mask_vs_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfredmax_mask_vs_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu @@ -548,7 +550,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -557,9 +559,9 @@ declare @llvm.riscv.vfredmax.nxv1f64.nxv2f64( , , , - i32); + iXLen); -define @intrinsic_vfredmax_vs_nxv1f64_nxv2f64_nxv1f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfredmax_vs_nxv1f64_nxv2f64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_vs_nxv1f64_nxv2f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu @@ -570,7 +572,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -580,9 +582,9 @@ declare @llvm.riscv.vfredmax.mask.nxv1f64.nxv2f64.nxv2i1( , , , - i32); + iXLen); -define @intrinsic_vfredmax_mask_vs_nxv1f64_nxv2f64_nxv1f64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfredmax_mask_vs_nxv1f64_nxv2f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv1f64_nxv2f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu @@ -594,7 +596,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -603,9 +605,9 @@ declare @llvm.riscv.vfredmax.nxv1f64.nxv4f64( , , , - i32); + iXLen); -define @intrinsic_vfredmax_vs_nxv1f64_nxv4f64_nxv1f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfredmax_vs_nxv1f64_nxv4f64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_vs_nxv1f64_nxv4f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu @@ -616,7 +618,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -626,9 +628,9 @@ declare @llvm.riscv.vfredmax.mask.nxv1f64.nxv4f64.nxv4i1( , , , - i32); + iXLen); -define @intrinsic_vfredmax_mask_vs_nxv1f64_nxv4f64_nxv1f64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfredmax_mask_vs_nxv1f64_nxv4f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv1f64_nxv4f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu @@ -640,7 +642,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -649,9 +651,9 @@ declare @llvm.riscv.vfredmax.nxv1f64.nxv8f64( , , , - i32); + iXLen); -define @intrinsic_vfredmax_vs_nxv1f64_nxv8f64_nxv1f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfredmax_vs_nxv1f64_nxv8f64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_vs_nxv1f64_nxv8f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu @@ -662,7 +664,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -672,9 +674,9 @@ declare @llvm.riscv.vfredmax.mask.nxv1f64.nxv8f64.nxv8i1( , , , - i32); + iXLen); -define @intrinsic_vfredmax_mask_vs_nxv1f64_nxv8f64_nxv1f64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfredmax_mask_vs_nxv1f64_nxv8f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv1f64_nxv8f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu @@ -686,7 +688,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfredmin-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfredmin-rv64.ll deleted file mode 100644 index d04ef7a6707dea0c84cdcb15c6c8f6386f1bf848..0000000000000000000000000000000000000000 --- a/llvm/test/CodeGen/RISCV/rvv/vfredmin-rv64.ll +++ /dev/null @@ -1,692 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s -declare @llvm.riscv.vfredmin.nxv4f16.nxv1f16( - , - , - , - i64); - -define @intrinsic_vfredmin_vs_nxv4f16_nxv1f16_nxv4f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfredmin_vs_nxv4f16_nxv1f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu -; CHECK-NEXT: vfredmin.vs v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredmin.nxv4f16.nxv1f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfredmin.mask.nxv4f16.nxv1f16( - , - , - , - , - i64); - -define @intrinsic_vfredmin_mask_vs_nxv4f16_nxv1f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv4f16_nxv1f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu -; CHECK-NEXT: vfredmin.vs v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredmin.mask.nxv4f16.nxv1f16( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfredmin.nxv4f16.nxv2f16( - , - , - , - i64); - -define @intrinsic_vfredmin_vs_nxv4f16_nxv2f16_nxv4f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfredmin_vs_nxv4f16_nxv2f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu -; CHECK-NEXT: vfredmin.vs v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredmin.nxv4f16.nxv2f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfredmin.mask.nxv4f16.nxv2f16( - , - , - , - , - i64); - -define @intrinsic_vfredmin_mask_vs_nxv4f16_nxv2f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv4f16_nxv2f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu -; CHECK-NEXT: vfredmin.vs v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredmin.mask.nxv4f16.nxv2f16( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfredmin.nxv4f16.nxv4f16( - , - , - , - i64); - -define @intrinsic_vfredmin_vs_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfredmin_vs_nxv4f16_nxv4f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu -; CHECK-NEXT: vfredmin.vs v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredmin.nxv4f16.nxv4f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfredmin.mask.nxv4f16.nxv4f16( - , - , - , - , - i64); - -define @intrinsic_vfredmin_mask_vs_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv4f16_nxv4f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu -; CHECK-NEXT: vfredmin.vs v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredmin.mask.nxv4f16.nxv4f16( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfredmin.nxv4f16.nxv8f16( - , - , - , - i64); - -define @intrinsic_vfredmin_vs_nxv4f16_nxv8f16_nxv4f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfredmin_vs_nxv4f16_nxv8f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu -; CHECK-NEXT: vfredmin.vs v8, v10, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredmin.nxv4f16.nxv8f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfredmin.mask.nxv4f16.nxv8f16( - , - , - , - , - i64); - -define @intrinsic_vfredmin_mask_vs_nxv4f16_nxv8f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv4f16_nxv8f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu -; CHECK-NEXT: vfredmin.vs v8, v10, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredmin.mask.nxv4f16.nxv8f16( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfredmin.nxv4f16.nxv16f16( - , - , - , - i64); - -define @intrinsic_vfredmin_vs_nxv4f16_nxv16f16_nxv4f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfredmin_vs_nxv4f16_nxv16f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu -; CHECK-NEXT: vfredmin.vs v8, v12, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredmin.nxv4f16.nxv16f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfredmin.mask.nxv4f16.nxv16f16( - , - , - , - , - i64); - -define @intrinsic_vfredmin_mask_vs_nxv4f16_nxv16f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv4f16_nxv16f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu -; CHECK-NEXT: vfredmin.vs v8, v12, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredmin.mask.nxv4f16.nxv16f16( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfredmin.nxv4f16.nxv32f16( - , - , - , - i64); - -define @intrinsic_vfredmin_vs_nxv4f16_nxv32f16_nxv4f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfredmin_vs_nxv4f16_nxv32f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu -; CHECK-NEXT: vfredmin.vs v8, v16, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredmin.nxv4f16.nxv32f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfredmin.mask.nxv4f16.nxv32f16( - , - , - , - , - i64); - -define @intrinsic_vfredmin_mask_vs_nxv4f16_nxv32f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv4f16_nxv32f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu -; CHECK-NEXT: vfredmin.vs v8, v16, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredmin.mask.nxv4f16.nxv32f16( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfredmin.nxv2f32.nxv1f32( - , - , - , - i64); - -define @intrinsic_vfredmin_vs_nxv2f32_nxv1f32_nxv2f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfredmin_vs_nxv2f32_nxv1f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu -; CHECK-NEXT: vfredmin.vs v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredmin.nxv2f32.nxv1f32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfredmin.mask.nxv2f32.nxv1f32( - , - , - , - , - i64); - -define @intrinsic_vfredmin_mask_vs_nxv2f32_nxv1f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv2f32_nxv1f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu -; CHECK-NEXT: vfredmin.vs v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv1f32( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfredmin.nxv2f32.nxv2f32( - , - , - , - i64); - -define @intrinsic_vfredmin_vs_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfredmin_vs_nxv2f32_nxv2f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu -; CHECK-NEXT: vfredmin.vs v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredmin.nxv2f32.nxv2f32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfredmin.mask.nxv2f32.nxv2f32( - , - , - , - , - i64); - -define @intrinsic_vfredmin_mask_vs_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv2f32_nxv2f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu -; CHECK-NEXT: vfredmin.vs v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv2f32( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfredmin.nxv2f32.nxv4f32( - , - , - , - i64); - -define @intrinsic_vfredmin_vs_nxv2f32_nxv4f32_nxv2f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfredmin_vs_nxv2f32_nxv4f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu -; CHECK-NEXT: vfredmin.vs v8, v10, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredmin.nxv2f32.nxv4f32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfredmin.mask.nxv2f32.nxv4f32( - , - , - , - , - i64); - -define @intrinsic_vfredmin_mask_vs_nxv2f32_nxv4f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv2f32_nxv4f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu -; CHECK-NEXT: vfredmin.vs v8, v10, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv4f32( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfredmin.nxv2f32.nxv8f32( - , - , - , - i64); - -define @intrinsic_vfredmin_vs_nxv2f32_nxv8f32_nxv2f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfredmin_vs_nxv2f32_nxv8f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu -; CHECK-NEXT: vfredmin.vs v8, v12, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredmin.nxv2f32.nxv8f32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfredmin.mask.nxv2f32.nxv8f32( - , - , - , - , - i64); - -define @intrinsic_vfredmin_mask_vs_nxv2f32_nxv8f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv2f32_nxv8f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu -; CHECK-NEXT: vfredmin.vs v8, v12, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv8f32( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfredmin.nxv2f32.nxv16f32( - , - , - , - i64); - -define @intrinsic_vfredmin_vs_nxv2f32_nxv16f32_nxv2f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfredmin_vs_nxv2f32_nxv16f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu -; CHECK-NEXT: vfredmin.vs v8, v16, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredmin.nxv2f32.nxv16f32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfredmin.mask.nxv2f32.nxv16f32( - , - , - , - , - i64); - -define @intrinsic_vfredmin_mask_vs_nxv2f32_nxv16f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv2f32_nxv16f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu -; CHECK-NEXT: vfredmin.vs v8, v16, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv16f32( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfredmin.nxv1f64.nxv1f64( - , - , - , - i64); - -define @intrinsic_vfredmin_vs_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfredmin_vs_nxv1f64_nxv1f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu -; CHECK-NEXT: vfredmin.vs v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredmin.nxv1f64.nxv1f64( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfredmin.mask.nxv1f64.nxv1f64( - , - , - , - , - i64); - -define @intrinsic_vfredmin_mask_vs_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv1f64_nxv1f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu -; CHECK-NEXT: vfredmin.vs v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredmin.mask.nxv1f64.nxv1f64( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfredmin.nxv1f64.nxv2f64( - , - , - , - i64); - -define @intrinsic_vfredmin_vs_nxv1f64_nxv2f64_nxv1f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfredmin_vs_nxv1f64_nxv2f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu -; CHECK-NEXT: vfredmin.vs v8, v10, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredmin.nxv1f64.nxv2f64( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfredmin.mask.nxv1f64.nxv2f64( - , - , - , - , - i64); - -define @intrinsic_vfredmin_mask_vs_nxv1f64_nxv2f64_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv1f64_nxv2f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu -; CHECK-NEXT: vfredmin.vs v8, v10, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredmin.mask.nxv1f64.nxv2f64( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfredmin.nxv1f64.nxv4f64( - , - , - , - i64); - -define @intrinsic_vfredmin_vs_nxv1f64_nxv4f64_nxv1f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfredmin_vs_nxv1f64_nxv4f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu -; CHECK-NEXT: vfredmin.vs v8, v12, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredmin.nxv1f64.nxv4f64( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfredmin.mask.nxv1f64.nxv4f64( - , - , - , - , - i64); - -define @intrinsic_vfredmin_mask_vs_nxv1f64_nxv4f64_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv1f64_nxv4f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu -; CHECK-NEXT: vfredmin.vs v8, v12, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredmin.mask.nxv1f64.nxv4f64( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfredmin.nxv1f64.nxv8f64( - , - , - , - i64); - -define @intrinsic_vfredmin_vs_nxv1f64_nxv8f64_nxv1f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfredmin_vs_nxv1f64_nxv8f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu -; CHECK-NEXT: vfredmin.vs v8, v16, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredmin.nxv1f64.nxv8f64( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfredmin.mask.nxv1f64.nxv8f64( - , - , - , - , - i64); - -define @intrinsic_vfredmin_mask_vs_nxv1f64_nxv8f64_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv1f64_nxv8f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu -; CHECK-NEXT: vfredmin.vs v8, v16, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredmin.mask.nxv1f64.nxv8f64( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfredmin-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfredmin.ll similarity index 88% rename from llvm/test/CodeGen/RISCV/rvv/vfredmin-rv32.ll rename to llvm/test/CodeGen/RISCV/rvv/vfredmin.ll index 9561be7b6fc0990496ba1556d7d46ce4713b13e2..4d0301d3485ce4368cce23c89b0d376295e6a3e7 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfredmin-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfredmin.ll @@ -1,13 +1,15 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfredmin.nxv4f16.nxv1f16( , , , - i32); + iXLen); -define @intrinsic_vfredmin_vs_nxv4f16_nxv1f16_nxv4f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfredmin_vs_nxv4f16_nxv1f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_vs_nxv4f16_nxv1f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu @@ -18,7 +20,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -28,9 +30,9 @@ declare @llvm.riscv.vfredmin.mask.nxv4f16.nxv1f16.nxv1i1( , , , - i32); + iXLen); -define @intrinsic_vfredmin_mask_vs_nxv4f16_nxv1f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfredmin_mask_vs_nxv4f16_nxv1f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv4f16_nxv1f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu @@ -42,7 +44,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -51,9 +53,9 @@ declare @llvm.riscv.vfredmin.nxv4f16.nxv2f16( , , , - i32); + iXLen); -define @intrinsic_vfredmin_vs_nxv4f16_nxv2f16_nxv4f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfredmin_vs_nxv4f16_nxv2f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_vs_nxv4f16_nxv2f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu @@ -64,7 +66,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -74,9 +76,9 @@ declare @llvm.riscv.vfredmin.mask.nxv4f16.nxv2f16.nxv2i1( , , , - i32); + iXLen); -define @intrinsic_vfredmin_mask_vs_nxv4f16_nxv2f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfredmin_mask_vs_nxv4f16_nxv2f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv4f16_nxv2f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu @@ -88,7 +90,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -97,9 +99,9 @@ declare @llvm.riscv.vfredmin.nxv4f16.nxv4f16( , , , - i32); + iXLen); -define @intrinsic_vfredmin_vs_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfredmin_vs_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_vs_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu @@ -110,7 +112,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -120,9 +122,9 @@ declare @llvm.riscv.vfredmin.mask.nxv4f16.nxv4f16.nxv4i1( , , , - i32); + iXLen); -define @intrinsic_vfredmin_mask_vs_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfredmin_mask_vs_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu @@ -134,7 +136,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -143,9 +145,9 @@ declare @llvm.riscv.vfredmin.nxv4f16.nxv8f16( , , , - i32); + iXLen); -define @intrinsic_vfredmin_vs_nxv4f16_nxv8f16_nxv4f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfredmin_vs_nxv4f16_nxv8f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_vs_nxv4f16_nxv8f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu @@ -156,7 +158,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -166,9 +168,9 @@ declare @llvm.riscv.vfredmin.mask.nxv4f16.nxv8f16.nxv8i1( , , , - i32); + iXLen); -define @intrinsic_vfredmin_mask_vs_nxv4f16_nxv8f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfredmin_mask_vs_nxv4f16_nxv8f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv4f16_nxv8f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu @@ -180,7 +182,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -189,9 +191,9 @@ declare @llvm.riscv.vfredmin.nxv4f16.nxv16f16( , , , - i32); + iXLen); -define @intrinsic_vfredmin_vs_nxv4f16_nxv16f16_nxv4f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfredmin_vs_nxv4f16_nxv16f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_vs_nxv4f16_nxv16f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu @@ -202,7 +204,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -212,9 +214,9 @@ declare @llvm.riscv.vfredmin.mask.nxv4f16.nxv16f16.nxv16i1( , , , - i32); + iXLen); -define @intrinsic_vfredmin_mask_vs_nxv4f16_nxv16f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfredmin_mask_vs_nxv4f16_nxv16f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv4f16_nxv16f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu @@ -226,7 +228,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -235,9 +237,9 @@ declare @llvm.riscv.vfredmin.nxv4f16.nxv32f16( , , , - i32); + iXLen); -define @intrinsic_vfredmin_vs_nxv4f16_nxv32f16_nxv4f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfredmin_vs_nxv4f16_nxv32f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_vs_nxv4f16_nxv32f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu @@ -248,7 +250,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -258,9 +260,9 @@ declare @llvm.riscv.vfredmin.mask.nxv4f16.nxv32f16.nxv32i1( , , , - i32); + iXLen); -define @intrinsic_vfredmin_mask_vs_nxv4f16_nxv32f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfredmin_mask_vs_nxv4f16_nxv32f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv4f16_nxv32f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu @@ -272,7 +274,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -281,9 +283,9 @@ declare @llvm.riscv.vfredmin.nxv2f32.nxv1f32( , , , - i32); + iXLen); -define @intrinsic_vfredmin_vs_nxv2f32_nxv1f32_nxv2f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfredmin_vs_nxv2f32_nxv1f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_vs_nxv2f32_nxv1f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu @@ -294,7 +296,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -304,9 +306,9 @@ declare @llvm.riscv.vfredmin.mask.nxv2f32.nxv1f32.nxv1i1( , , , - i32); + iXLen); -define @intrinsic_vfredmin_mask_vs_nxv2f32_nxv1f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfredmin_mask_vs_nxv2f32_nxv1f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv2f32_nxv1f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu @@ -318,7 +320,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -327,9 +329,9 @@ declare @llvm.riscv.vfredmin.nxv2f32.nxv2f32( , , , - i32); + iXLen); -define @intrinsic_vfredmin_vs_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfredmin_vs_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_vs_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu @@ -340,7 +342,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -350,9 +352,9 @@ declare @llvm.riscv.vfredmin.mask.nxv2f32.nxv2f32.nxv2i1( , , , - i32); + iXLen); -define @intrinsic_vfredmin_mask_vs_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfredmin_mask_vs_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu @@ -364,7 +366,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -373,9 +375,9 @@ declare @llvm.riscv.vfredmin.nxv2f32.nxv4f32( , , , - i32); + iXLen); -define @intrinsic_vfredmin_vs_nxv2f32_nxv4f32_nxv2f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfredmin_vs_nxv2f32_nxv4f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_vs_nxv2f32_nxv4f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu @@ -386,7 +388,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -396,9 +398,9 @@ declare @llvm.riscv.vfredmin.mask.nxv2f32.nxv4f32.nxv4i1( , , , - i32); + iXLen); -define @intrinsic_vfredmin_mask_vs_nxv2f32_nxv4f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfredmin_mask_vs_nxv2f32_nxv4f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv2f32_nxv4f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu @@ -410,7 +412,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -419,9 +421,9 @@ declare @llvm.riscv.vfredmin.nxv2f32.nxv8f32( , , , - i32); + iXLen); -define @intrinsic_vfredmin_vs_nxv2f32_nxv8f32_nxv2f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfredmin_vs_nxv2f32_nxv8f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_vs_nxv2f32_nxv8f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu @@ -432,7 +434,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -442,9 +444,9 @@ declare @llvm.riscv.vfredmin.mask.nxv2f32.nxv8f32.nxv8i1( , , , - i32); + iXLen); -define @intrinsic_vfredmin_mask_vs_nxv2f32_nxv8f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfredmin_mask_vs_nxv2f32_nxv8f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv2f32_nxv8f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu @@ -456,7 +458,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -465,9 +467,9 @@ declare @llvm.riscv.vfredmin.nxv2f32.nxv16f32( , , , - i32); + iXLen); -define @intrinsic_vfredmin_vs_nxv2f32_nxv16f32_nxv2f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfredmin_vs_nxv2f32_nxv16f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_vs_nxv2f32_nxv16f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu @@ -478,7 +480,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -488,9 +490,9 @@ declare @llvm.riscv.vfredmin.mask.nxv2f32.nxv16f32.nxv16i1( , , , - i32); + iXLen); -define @intrinsic_vfredmin_mask_vs_nxv2f32_nxv16f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfredmin_mask_vs_nxv2f32_nxv16f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv2f32_nxv16f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu @@ -502,7 +504,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -511,9 +513,9 @@ declare @llvm.riscv.vfredmin.nxv1f64.nxv1f64( , , , - i32); + iXLen); -define @intrinsic_vfredmin_vs_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfredmin_vs_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_vs_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu @@ -524,7 +526,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -534,9 +536,9 @@ declare @llvm.riscv.vfredmin.mask.nxv1f64.nxv1f64.nxv1i1( , , , - i32); + iXLen); -define @intrinsic_vfredmin_mask_vs_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfredmin_mask_vs_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu @@ -548,7 +550,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -557,9 +559,9 @@ declare @llvm.riscv.vfredmin.nxv1f64.nxv2f64( , , , - i32); + iXLen); -define @intrinsic_vfredmin_vs_nxv1f64_nxv2f64_nxv1f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfredmin_vs_nxv1f64_nxv2f64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_vs_nxv1f64_nxv2f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu @@ -570,7 +572,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -580,9 +582,9 @@ declare @llvm.riscv.vfredmin.mask.nxv1f64.nxv2f64.nxv2i1( , , , - i32); + iXLen); -define @intrinsic_vfredmin_mask_vs_nxv1f64_nxv2f64_nxv1f64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfredmin_mask_vs_nxv1f64_nxv2f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv1f64_nxv2f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu @@ -594,7 +596,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -603,9 +605,9 @@ declare @llvm.riscv.vfredmin.nxv1f64.nxv4f64( , , , - i32); + iXLen); -define @intrinsic_vfredmin_vs_nxv1f64_nxv4f64_nxv1f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfredmin_vs_nxv1f64_nxv4f64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_vs_nxv1f64_nxv4f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu @@ -616,7 +618,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -626,9 +628,9 @@ declare @llvm.riscv.vfredmin.mask.nxv1f64.nxv4f64.nxv4i1( , , , - i32); + iXLen); -define @intrinsic_vfredmin_mask_vs_nxv1f64_nxv4f64_nxv1f64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfredmin_mask_vs_nxv1f64_nxv4f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv1f64_nxv4f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu @@ -640,7 +642,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -649,9 +651,9 @@ declare @llvm.riscv.vfredmin.nxv1f64.nxv8f64( , , , - i32); + iXLen); -define @intrinsic_vfredmin_vs_nxv1f64_nxv8f64_nxv1f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfredmin_vs_nxv1f64_nxv8f64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_vs_nxv1f64_nxv8f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu @@ -662,7 +664,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -672,9 +674,9 @@ declare @llvm.riscv.vfredmin.mask.nxv1f64.nxv8f64.nxv8i1( , , , - i32); + iXLen); -define @intrinsic_vfredmin_mask_vs_nxv1f64_nxv8f64_nxv1f64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfredmin_mask_vs_nxv1f64_nxv8f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv1f64_nxv8f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu @@ -686,7 +688,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfredosum-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfredosum-rv64.ll deleted file mode 100644 index 8c42e43d9094d654cc6993bc3c34b488b6709f3b..0000000000000000000000000000000000000000 --- a/llvm/test/CodeGen/RISCV/rvv/vfredosum-rv64.ll +++ /dev/null @@ -1,692 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s -declare @llvm.riscv.vfredosum.nxv4f16.nxv1f16( - , - , - , - i64); - -define @intrinsic_vfredosum_vs_nxv4f16_nxv1f16_nxv4f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfredosum_vs_nxv4f16_nxv1f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu -; CHECK-NEXT: vfredosum.vs v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredosum.nxv4f16.nxv1f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfredosum.mask.nxv4f16.nxv1f16( - , - , - , - , - i64); - -define @intrinsic_vfredosum_mask_vs_nxv4f16_nxv1f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv4f16_nxv1f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu -; CHECK-NEXT: vfredosum.vs v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredosum.mask.nxv4f16.nxv1f16( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfredosum.nxv4f16.nxv2f16( - , - , - , - i64); - -define @intrinsic_vfredosum_vs_nxv4f16_nxv2f16_nxv4f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfredosum_vs_nxv4f16_nxv2f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu -; CHECK-NEXT: vfredosum.vs v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredosum.nxv4f16.nxv2f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfredosum.mask.nxv4f16.nxv2f16( - , - , - , - , - i64); - -define @intrinsic_vfredosum_mask_vs_nxv4f16_nxv2f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv4f16_nxv2f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu -; CHECK-NEXT: vfredosum.vs v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredosum.mask.nxv4f16.nxv2f16( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfredosum.nxv4f16.nxv4f16( - , - , - , - i64); - -define @intrinsic_vfredosum_vs_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfredosum_vs_nxv4f16_nxv4f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu -; CHECK-NEXT: vfredosum.vs v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredosum.nxv4f16.nxv4f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfredosum.mask.nxv4f16.nxv4f16( - , - , - , - , - i64); - -define @intrinsic_vfredosum_mask_vs_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv4f16_nxv4f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu -; CHECK-NEXT: vfredosum.vs v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredosum.mask.nxv4f16.nxv4f16( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfredosum.nxv4f16.nxv8f16( - , - , - , - i64); - -define @intrinsic_vfredosum_vs_nxv4f16_nxv8f16_nxv4f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfredosum_vs_nxv4f16_nxv8f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu -; CHECK-NEXT: vfredosum.vs v8, v10, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredosum.nxv4f16.nxv8f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfredosum.mask.nxv4f16.nxv8f16( - , - , - , - , - i64); - -define @intrinsic_vfredosum_mask_vs_nxv4f16_nxv8f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv4f16_nxv8f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu -; CHECK-NEXT: vfredosum.vs v8, v10, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredosum.mask.nxv4f16.nxv8f16( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfredosum.nxv4f16.nxv16f16( - , - , - , - i64); - -define @intrinsic_vfredosum_vs_nxv4f16_nxv16f16_nxv4f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfredosum_vs_nxv4f16_nxv16f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu -; CHECK-NEXT: vfredosum.vs v8, v12, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredosum.nxv4f16.nxv16f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfredosum.mask.nxv4f16.nxv16f16( - , - , - , - , - i64); - -define @intrinsic_vfredosum_mask_vs_nxv4f16_nxv16f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv4f16_nxv16f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu -; CHECK-NEXT: vfredosum.vs v8, v12, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredosum.mask.nxv4f16.nxv16f16( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfredosum.nxv4f16.nxv32f16( - , - , - , - i64); - -define @intrinsic_vfredosum_vs_nxv4f16_nxv32f16_nxv4f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfredosum_vs_nxv4f16_nxv32f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu -; CHECK-NEXT: vfredosum.vs v8, v16, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredosum.nxv4f16.nxv32f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfredosum.mask.nxv4f16.nxv32f16( - , - , - , - , - i64); - -define @intrinsic_vfredosum_mask_vs_nxv4f16_nxv32f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv4f16_nxv32f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu -; CHECK-NEXT: vfredosum.vs v8, v16, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredosum.mask.nxv4f16.nxv32f16( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfredosum.nxv2f32.nxv1f32( - , - , - , - i64); - -define @intrinsic_vfredosum_vs_nxv2f32_nxv1f32_nxv2f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfredosum_vs_nxv2f32_nxv1f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu -; CHECK-NEXT: vfredosum.vs v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredosum.nxv2f32.nxv1f32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfredosum.mask.nxv2f32.nxv1f32( - , - , - , - , - i64); - -define @intrinsic_vfredosum_mask_vs_nxv2f32_nxv1f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv2f32_nxv1f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu -; CHECK-NEXT: vfredosum.vs v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv1f32( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfredosum.nxv2f32.nxv2f32( - , - , - , - i64); - -define @intrinsic_vfredosum_vs_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfredosum_vs_nxv2f32_nxv2f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu -; CHECK-NEXT: vfredosum.vs v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredosum.nxv2f32.nxv2f32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfredosum.mask.nxv2f32.nxv2f32( - , - , - , - , - i64); - -define @intrinsic_vfredosum_mask_vs_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv2f32_nxv2f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu -; CHECK-NEXT: vfredosum.vs v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv2f32( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfredosum.nxv2f32.nxv4f32( - , - , - , - i64); - -define @intrinsic_vfredosum_vs_nxv2f32_nxv4f32_nxv2f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfredosum_vs_nxv2f32_nxv4f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu -; CHECK-NEXT: vfredosum.vs v8, v10, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredosum.nxv2f32.nxv4f32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfredosum.mask.nxv2f32.nxv4f32( - , - , - , - , - i64); - -define @intrinsic_vfredosum_mask_vs_nxv2f32_nxv4f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv2f32_nxv4f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu -; CHECK-NEXT: vfredosum.vs v8, v10, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv4f32( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfredosum.nxv2f32.nxv8f32( - , - , - , - i64); - -define @intrinsic_vfredosum_vs_nxv2f32_nxv8f32_nxv2f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfredosum_vs_nxv2f32_nxv8f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu -; CHECK-NEXT: vfredosum.vs v8, v12, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredosum.nxv2f32.nxv8f32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfredosum.mask.nxv2f32.nxv8f32( - , - , - , - , - i64); - -define @intrinsic_vfredosum_mask_vs_nxv2f32_nxv8f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv2f32_nxv8f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu -; CHECK-NEXT: vfredosum.vs v8, v12, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv8f32( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfredosum.nxv2f32.nxv16f32( - , - , - , - i64); - -define @intrinsic_vfredosum_vs_nxv2f32_nxv16f32_nxv2f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfredosum_vs_nxv2f32_nxv16f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu -; CHECK-NEXT: vfredosum.vs v8, v16, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredosum.nxv2f32.nxv16f32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfredosum.mask.nxv2f32.nxv16f32( - , - , - , - , - i64); - -define @intrinsic_vfredosum_mask_vs_nxv2f32_nxv16f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv2f32_nxv16f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu -; CHECK-NEXT: vfredosum.vs v8, v16, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv16f32( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfredosum.nxv1f64.nxv1f64( - , - , - , - i64); - -define @intrinsic_vfredosum_vs_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfredosum_vs_nxv1f64_nxv1f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu -; CHECK-NEXT: vfredosum.vs v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredosum.nxv1f64.nxv1f64( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfredosum.mask.nxv1f64.nxv1f64( - , - , - , - , - i64); - -define @intrinsic_vfredosum_mask_vs_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv1f64_nxv1f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu -; CHECK-NEXT: vfredosum.vs v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredosum.mask.nxv1f64.nxv1f64( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfredosum.nxv1f64.nxv2f64( - , - , - , - i64); - -define @intrinsic_vfredosum_vs_nxv1f64_nxv2f64_nxv1f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfredosum_vs_nxv1f64_nxv2f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu -; CHECK-NEXT: vfredosum.vs v8, v10, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredosum.nxv1f64.nxv2f64( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfredosum.mask.nxv1f64.nxv2f64( - , - , - , - , - i64); - -define @intrinsic_vfredosum_mask_vs_nxv1f64_nxv2f64_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv1f64_nxv2f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu -; CHECK-NEXT: vfredosum.vs v8, v10, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredosum.mask.nxv1f64.nxv2f64( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfredosum.nxv1f64.nxv4f64( - , - , - , - i64); - -define @intrinsic_vfredosum_vs_nxv1f64_nxv4f64_nxv1f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfredosum_vs_nxv1f64_nxv4f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu -; CHECK-NEXT: vfredosum.vs v8, v12, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredosum.nxv1f64.nxv4f64( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfredosum.mask.nxv1f64.nxv4f64( - , - , - , - , - i64); - -define @intrinsic_vfredosum_mask_vs_nxv1f64_nxv4f64_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv1f64_nxv4f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu -; CHECK-NEXT: vfredosum.vs v8, v12, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredosum.mask.nxv1f64.nxv4f64( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfredosum.nxv1f64.nxv8f64( - , - , - , - i64); - -define @intrinsic_vfredosum_vs_nxv1f64_nxv8f64_nxv1f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfredosum_vs_nxv1f64_nxv8f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu -; CHECK-NEXT: vfredosum.vs v8, v16, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredosum.nxv1f64.nxv8f64( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfredosum.mask.nxv1f64.nxv8f64( - , - , - , - , - i64); - -define @intrinsic_vfredosum_mask_vs_nxv1f64_nxv8f64_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv1f64_nxv8f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu -; CHECK-NEXT: vfredosum.vs v8, v16, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredosum.mask.nxv1f64.nxv8f64( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfredosum-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfredosum.ll similarity index 88% rename from llvm/test/CodeGen/RISCV/rvv/vfredosum-rv32.ll rename to llvm/test/CodeGen/RISCV/rvv/vfredosum.ll index 1f1e68e0dbc9b866237b3b814fba8d2b68ac2204..b814315d90cd16ab81b84589ce5fe29be0b1cb5f 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfredosum-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfredosum.ll @@ -1,13 +1,15 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfredosum.nxv4f16.nxv1f16( , , , - i32); + iXLen); -define @intrinsic_vfredosum_vs_nxv4f16_nxv1f16_nxv4f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfredosum_vs_nxv4f16_nxv1f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_vs_nxv4f16_nxv1f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu @@ -18,7 +20,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -28,9 +30,9 @@ declare @llvm.riscv.vfredosum.mask.nxv4f16.nxv1f16.nxv1i1( , , , - i32); + iXLen); -define @intrinsic_vfredosum_mask_vs_nxv4f16_nxv1f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfredosum_mask_vs_nxv4f16_nxv1f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv4f16_nxv1f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu @@ -42,7 +44,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -51,9 +53,9 @@ declare @llvm.riscv.vfredosum.nxv4f16.nxv2f16( , , , - i32); + iXLen); -define @intrinsic_vfredosum_vs_nxv4f16_nxv2f16_nxv4f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfredosum_vs_nxv4f16_nxv2f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_vs_nxv4f16_nxv2f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu @@ -64,7 +66,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -74,9 +76,9 @@ declare @llvm.riscv.vfredosum.mask.nxv4f16.nxv2f16.nxv2i1( , , , - i32); + iXLen); -define @intrinsic_vfredosum_mask_vs_nxv4f16_nxv2f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfredosum_mask_vs_nxv4f16_nxv2f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv4f16_nxv2f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu @@ -88,7 +90,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -97,9 +99,9 @@ declare @llvm.riscv.vfredosum.nxv4f16.nxv4f16( , , , - i32); + iXLen); -define @intrinsic_vfredosum_vs_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfredosum_vs_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_vs_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu @@ -110,7 +112,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -120,9 +122,9 @@ declare @llvm.riscv.vfredosum.mask.nxv4f16.nxv4f16.nxv4i1( , , , - i32); + iXLen); -define @intrinsic_vfredosum_mask_vs_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfredosum_mask_vs_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu @@ -134,7 +136,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -143,9 +145,9 @@ declare @llvm.riscv.vfredosum.nxv4f16.nxv8f16( , , , - i32); + iXLen); -define @intrinsic_vfredosum_vs_nxv4f16_nxv8f16_nxv4f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfredosum_vs_nxv4f16_nxv8f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_vs_nxv4f16_nxv8f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu @@ -156,7 +158,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -166,9 +168,9 @@ declare @llvm.riscv.vfredosum.mask.nxv4f16.nxv8f16.nxv8i1( , , , - i32); + iXLen); -define @intrinsic_vfredosum_mask_vs_nxv4f16_nxv8f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfredosum_mask_vs_nxv4f16_nxv8f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv4f16_nxv8f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu @@ -180,7 +182,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -189,9 +191,9 @@ declare @llvm.riscv.vfredosum.nxv4f16.nxv16f16( , , , - i32); + iXLen); -define @intrinsic_vfredosum_vs_nxv4f16_nxv16f16_nxv4f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfredosum_vs_nxv4f16_nxv16f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_vs_nxv4f16_nxv16f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu @@ -202,7 +204,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -212,9 +214,9 @@ declare @llvm.riscv.vfredosum.mask.nxv4f16.nxv16f16.nxv16i1( , , , - i32); + iXLen); -define @intrinsic_vfredosum_mask_vs_nxv4f16_nxv16f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfredosum_mask_vs_nxv4f16_nxv16f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv4f16_nxv16f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu @@ -226,7 +228,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -235,9 +237,9 @@ declare @llvm.riscv.vfredosum.nxv4f16.nxv32f16( , , , - i32); + iXLen); -define @intrinsic_vfredosum_vs_nxv4f16_nxv32f16_nxv4f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfredosum_vs_nxv4f16_nxv32f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_vs_nxv4f16_nxv32f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu @@ -248,7 +250,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -258,9 +260,9 @@ declare @llvm.riscv.vfredosum.mask.nxv4f16.nxv32f16.nxv32i1( , , , - i32); + iXLen); -define @intrinsic_vfredosum_mask_vs_nxv4f16_nxv32f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfredosum_mask_vs_nxv4f16_nxv32f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv4f16_nxv32f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu @@ -272,7 +274,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -281,9 +283,9 @@ declare @llvm.riscv.vfredosum.nxv2f32.nxv1f32( , , , - i32); + iXLen); -define @intrinsic_vfredosum_vs_nxv2f32_nxv1f32_nxv2f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfredosum_vs_nxv2f32_nxv1f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_vs_nxv2f32_nxv1f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu @@ -294,7 +296,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -304,9 +306,9 @@ declare @llvm.riscv.vfredosum.mask.nxv2f32.nxv1f32.nxv1i1( , , , - i32); + iXLen); -define @intrinsic_vfredosum_mask_vs_nxv2f32_nxv1f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfredosum_mask_vs_nxv2f32_nxv1f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv2f32_nxv1f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu @@ -318,7 +320,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -327,9 +329,9 @@ declare @llvm.riscv.vfredosum.nxv2f32.nxv2f32( , , , - i32); + iXLen); -define @intrinsic_vfredosum_vs_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfredosum_vs_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_vs_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu @@ -340,7 +342,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -350,9 +352,9 @@ declare @llvm.riscv.vfredosum.mask.nxv2f32.nxv2f32.nxv2i1( , , , - i32); + iXLen); -define @intrinsic_vfredosum_mask_vs_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfredosum_mask_vs_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu @@ -364,7 +366,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -373,9 +375,9 @@ declare @llvm.riscv.vfredosum.nxv2f32.nxv4f32( , , , - i32); + iXLen); -define @intrinsic_vfredosum_vs_nxv2f32_nxv4f32_nxv2f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfredosum_vs_nxv2f32_nxv4f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_vs_nxv2f32_nxv4f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu @@ -386,7 +388,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -396,9 +398,9 @@ declare @llvm.riscv.vfredosum.mask.nxv2f32.nxv4f32.nxv4i1( , , , - i32); + iXLen); -define @intrinsic_vfredosum_mask_vs_nxv2f32_nxv4f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfredosum_mask_vs_nxv2f32_nxv4f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv2f32_nxv4f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu @@ -410,7 +412,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -419,9 +421,9 @@ declare @llvm.riscv.vfredosum.nxv2f32.nxv8f32( , , , - i32); + iXLen); -define @intrinsic_vfredosum_vs_nxv2f32_nxv8f32_nxv2f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfredosum_vs_nxv2f32_nxv8f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_vs_nxv2f32_nxv8f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu @@ -432,7 +434,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -442,9 +444,9 @@ declare @llvm.riscv.vfredosum.mask.nxv2f32.nxv8f32.nxv8i1( , , , - i32); + iXLen); -define @intrinsic_vfredosum_mask_vs_nxv2f32_nxv8f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfredosum_mask_vs_nxv2f32_nxv8f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv2f32_nxv8f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu @@ -456,7 +458,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -465,9 +467,9 @@ declare @llvm.riscv.vfredosum.nxv2f32.nxv16f32( , , , - i32); + iXLen); -define @intrinsic_vfredosum_vs_nxv2f32_nxv16f32_nxv2f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfredosum_vs_nxv2f32_nxv16f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_vs_nxv2f32_nxv16f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu @@ -478,7 +480,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -488,9 +490,9 @@ declare @llvm.riscv.vfredosum.mask.nxv2f32.nxv16f32.nxv16i1 , , , - i32); + iXLen); -define @intrinsic_vfredosum_mask_vs_nxv2f32_nxv16f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfredosum_mask_vs_nxv2f32_nxv16f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv2f32_nxv16f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu @@ -502,7 +504,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -511,9 +513,9 @@ declare @llvm.riscv.vfredosum.nxv1f64.nxv1f64( , , , - i32); + iXLen); -define @intrinsic_vfredosum_vs_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfredosum_vs_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_vs_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu @@ -524,7 +526,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -534,9 +536,9 @@ declare @llvm.riscv.vfredosum.mask.nxv1f64.nxv1f64.nxv1i1( , , , - i32); + iXLen); -define @intrinsic_vfredosum_mask_vs_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfredosum_mask_vs_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu @@ -548,7 +550,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -557,9 +559,9 @@ declare @llvm.riscv.vfredosum.nxv1f64.nxv2f64( , , , - i32); + iXLen); -define @intrinsic_vfredosum_vs_nxv1f64_nxv2f64_nxv1f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfredosum_vs_nxv1f64_nxv2f64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_vs_nxv1f64_nxv2f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu @@ -570,7 +572,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -580,9 +582,9 @@ declare @llvm.riscv.vfredosum.mask.nxv1f64.nxv2f64.nxv2i1( , , , - i32); + iXLen); -define @intrinsic_vfredosum_mask_vs_nxv1f64_nxv2f64_nxv1f64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfredosum_mask_vs_nxv1f64_nxv2f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv1f64_nxv2f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu @@ -594,7 +596,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -603,9 +605,9 @@ declare @llvm.riscv.vfredosum.nxv1f64.nxv4f64( , , , - i32); + iXLen); -define @intrinsic_vfredosum_vs_nxv1f64_nxv4f64_nxv1f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfredosum_vs_nxv1f64_nxv4f64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_vs_nxv1f64_nxv4f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu @@ -616,7 +618,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -626,9 +628,9 @@ declare @llvm.riscv.vfredosum.mask.nxv1f64.nxv4f64.nxv4i1( , , , - i32); + iXLen); -define @intrinsic_vfredosum_mask_vs_nxv1f64_nxv4f64_nxv1f64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfredosum_mask_vs_nxv1f64_nxv4f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv1f64_nxv4f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu @@ -640,7 +642,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -649,9 +651,9 @@ declare @llvm.riscv.vfredosum.nxv1f64.nxv8f64( , , , - i32); + iXLen); -define @intrinsic_vfredosum_vs_nxv1f64_nxv8f64_nxv1f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfredosum_vs_nxv1f64_nxv8f64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_vs_nxv1f64_nxv8f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu @@ -662,7 +664,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -672,9 +674,9 @@ declare @llvm.riscv.vfredosum.mask.nxv1f64.nxv8f64.nxv8i1( , , , - i32); + iXLen); -define @intrinsic_vfredosum_mask_vs_nxv1f64_nxv8f64_nxv1f64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfredosum_mask_vs_nxv1f64_nxv8f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv1f64_nxv8f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu @@ -686,7 +688,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfredusum-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfredusum-rv64.ll deleted file mode 100644 index 9264397afa0e7b03a7fc24a22b08e126ff5b3eb0..0000000000000000000000000000000000000000 --- a/llvm/test/CodeGen/RISCV/rvv/vfredusum-rv64.ll +++ /dev/null @@ -1,692 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s -declare @llvm.riscv.vfredusum.nxv4f16.nxv1f16( - , - , - , - i64); - -define @intrinsic_vfredusum_vs_nxv4f16_nxv1f16_nxv4f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfredusum_vs_nxv4f16_nxv1f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu -; CHECK-NEXT: vfredusum.vs v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredusum.nxv4f16.nxv1f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfredusum.mask.nxv4f16.nxv1f16( - , - , - , - , - i64); - -define @intrinsic_vfredusum_mask_vs_nxv4f16_nxv1f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv4f16_nxv1f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu -; CHECK-NEXT: vfredusum.vs v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredusum.mask.nxv4f16.nxv1f16( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfredusum.nxv4f16.nxv2f16( - , - , - , - i64); - -define @intrinsic_vfredusum_vs_nxv4f16_nxv2f16_nxv4f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfredusum_vs_nxv4f16_nxv2f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu -; CHECK-NEXT: vfredusum.vs v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredusum.nxv4f16.nxv2f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfredusum.mask.nxv4f16.nxv2f16( - , - , - , - , - i64); - -define @intrinsic_vfredusum_mask_vs_nxv4f16_nxv2f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv4f16_nxv2f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu -; CHECK-NEXT: vfredusum.vs v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredusum.mask.nxv4f16.nxv2f16( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfredusum.nxv4f16.nxv4f16( - , - , - , - i64); - -define @intrinsic_vfredusum_vs_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfredusum_vs_nxv4f16_nxv4f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu -; CHECK-NEXT: vfredusum.vs v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredusum.nxv4f16.nxv4f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfredusum.mask.nxv4f16.nxv4f16( - , - , - , - , - i64); - -define @intrinsic_vfredusum_mask_vs_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv4f16_nxv4f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu -; CHECK-NEXT: vfredusum.vs v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredusum.mask.nxv4f16.nxv4f16( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfredusum.nxv4f16.nxv8f16( - , - , - , - i64); - -define @intrinsic_vfredusum_vs_nxv4f16_nxv8f16_nxv4f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfredusum_vs_nxv4f16_nxv8f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu -; CHECK-NEXT: vfredusum.vs v8, v10, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredusum.nxv4f16.nxv8f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfredusum.mask.nxv4f16.nxv8f16( - , - , - , - , - i64); - -define @intrinsic_vfredusum_mask_vs_nxv4f16_nxv8f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv4f16_nxv8f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu -; CHECK-NEXT: vfredusum.vs v8, v10, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredusum.mask.nxv4f16.nxv8f16( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfredusum.nxv4f16.nxv16f16( - , - , - , - i64); - -define @intrinsic_vfredusum_vs_nxv4f16_nxv16f16_nxv4f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfredusum_vs_nxv4f16_nxv16f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu -; CHECK-NEXT: vfredusum.vs v8, v12, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredusum.nxv4f16.nxv16f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfredusum.mask.nxv4f16.nxv16f16( - , - , - , - , - i64); - -define @intrinsic_vfredusum_mask_vs_nxv4f16_nxv16f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv4f16_nxv16f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu -; CHECK-NEXT: vfredusum.vs v8, v12, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredusum.mask.nxv4f16.nxv16f16( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfredusum.nxv4f16.nxv32f16( - , - , - , - i64); - -define @intrinsic_vfredusum_vs_nxv4f16_nxv32f16_nxv4f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfredusum_vs_nxv4f16_nxv32f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu -; CHECK-NEXT: vfredusum.vs v8, v16, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredusum.nxv4f16.nxv32f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfredusum.mask.nxv4f16.nxv32f16( - , - , - , - , - i64); - -define @intrinsic_vfredusum_mask_vs_nxv4f16_nxv32f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv4f16_nxv32f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu -; CHECK-NEXT: vfredusum.vs v8, v16, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredusum.mask.nxv4f16.nxv32f16( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfredusum.nxv2f32.nxv1f32( - , - , - , - i64); - -define @intrinsic_vfredusum_vs_nxv2f32_nxv1f32_nxv2f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfredusum_vs_nxv2f32_nxv1f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu -; CHECK-NEXT: vfredusum.vs v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredusum.nxv2f32.nxv1f32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfredusum.mask.nxv2f32.nxv1f32( - , - , - , - , - i64); - -define @intrinsic_vfredusum_mask_vs_nxv2f32_nxv1f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv2f32_nxv1f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu -; CHECK-NEXT: vfredusum.vs v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredusum.mask.nxv2f32.nxv1f32( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfredusum.nxv2f32.nxv2f32( - , - , - , - i64); - -define @intrinsic_vfredusum_vs_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfredusum_vs_nxv2f32_nxv2f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu -; CHECK-NEXT: vfredusum.vs v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredusum.nxv2f32.nxv2f32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfredusum.mask.nxv2f32.nxv2f32( - , - , - , - , - i64); - -define @intrinsic_vfredusum_mask_vs_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv2f32_nxv2f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu -; CHECK-NEXT: vfredusum.vs v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredusum.mask.nxv2f32.nxv2f32( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfredusum.nxv2f32.nxv4f32( - , - , - , - i64); - -define @intrinsic_vfredusum_vs_nxv2f32_nxv4f32_nxv2f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfredusum_vs_nxv2f32_nxv4f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu -; CHECK-NEXT: vfredusum.vs v8, v10, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredusum.nxv2f32.nxv4f32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfredusum.mask.nxv2f32.nxv4f32( - , - , - , - , - i64); - -define @intrinsic_vfredusum_mask_vs_nxv2f32_nxv4f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv2f32_nxv4f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu -; CHECK-NEXT: vfredusum.vs v8, v10, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredusum.mask.nxv2f32.nxv4f32( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfredusum.nxv2f32.nxv8f32( - , - , - , - i64); - -define @intrinsic_vfredusum_vs_nxv2f32_nxv8f32_nxv2f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfredusum_vs_nxv2f32_nxv8f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu -; CHECK-NEXT: vfredusum.vs v8, v12, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredusum.nxv2f32.nxv8f32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfredusum.mask.nxv2f32.nxv8f32( - , - , - , - , - i64); - -define @intrinsic_vfredusum_mask_vs_nxv2f32_nxv8f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv2f32_nxv8f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu -; CHECK-NEXT: vfredusum.vs v8, v12, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredusum.mask.nxv2f32.nxv8f32( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfredusum.nxv2f32.nxv16f32( - , - , - , - i64); - -define @intrinsic_vfredusum_vs_nxv2f32_nxv16f32_nxv2f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfredusum_vs_nxv2f32_nxv16f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu -; CHECK-NEXT: vfredusum.vs v8, v16, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredusum.nxv2f32.nxv16f32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfredusum.mask.nxv2f32.nxv16f32( - , - , - , - , - i64); - -define @intrinsic_vfredusum_mask_vs_nxv2f32_nxv16f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv2f32_nxv16f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu -; CHECK-NEXT: vfredusum.vs v8, v16, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredusum.mask.nxv2f32.nxv16f32( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfredusum.nxv1f64.nxv1f64( - , - , - , - i64); - -define @intrinsic_vfredusum_vs_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfredusum_vs_nxv1f64_nxv1f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu -; CHECK-NEXT: vfredusum.vs v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredusum.nxv1f64.nxv1f64( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfredusum.mask.nxv1f64.nxv1f64( - , - , - , - , - i64); - -define @intrinsic_vfredusum_mask_vs_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv1f64_nxv1f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu -; CHECK-NEXT: vfredusum.vs v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredusum.mask.nxv1f64.nxv1f64( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfredusum.nxv1f64.nxv2f64( - , - , - , - i64); - -define @intrinsic_vfredusum_vs_nxv1f64_nxv2f64_nxv1f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfredusum_vs_nxv1f64_nxv2f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu -; CHECK-NEXT: vfredusum.vs v8, v10, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredusum.nxv1f64.nxv2f64( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfredusum.mask.nxv1f64.nxv2f64( - , - , - , - , - i64); - -define @intrinsic_vfredusum_mask_vs_nxv1f64_nxv2f64_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv1f64_nxv2f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu -; CHECK-NEXT: vfredusum.vs v8, v10, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredusum.mask.nxv1f64.nxv2f64( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfredusum.nxv1f64.nxv4f64( - , - , - , - i64); - -define @intrinsic_vfredusum_vs_nxv1f64_nxv4f64_nxv1f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfredusum_vs_nxv1f64_nxv4f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu -; CHECK-NEXT: vfredusum.vs v8, v12, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredusum.nxv1f64.nxv4f64( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfredusum.mask.nxv1f64.nxv4f64( - , - , - , - , - i64); - -define @intrinsic_vfredusum_mask_vs_nxv1f64_nxv4f64_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv1f64_nxv4f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu -; CHECK-NEXT: vfredusum.vs v8, v12, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredusum.mask.nxv1f64.nxv4f64( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfredusum.nxv1f64.nxv8f64( - , - , - , - i64); - -define @intrinsic_vfredusum_vs_nxv1f64_nxv8f64_nxv1f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfredusum_vs_nxv1f64_nxv8f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu -; CHECK-NEXT: vfredusum.vs v8, v16, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredusum.nxv1f64.nxv8f64( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfredusum.mask.nxv1f64.nxv8f64( - , - , - , - , - i64); - -define @intrinsic_vfredusum_mask_vs_nxv1f64_nxv8f64_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv1f64_nxv8f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu -; CHECK-NEXT: vfredusum.vs v8, v16, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredusum.mask.nxv1f64.nxv8f64( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfredusum-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfredusum.ll similarity index 88% rename from llvm/test/CodeGen/RISCV/rvv/vfredusum-rv32.ll rename to llvm/test/CodeGen/RISCV/rvv/vfredusum.ll index 7b3691c74887c43c849aa0c27f3fb3af97e7835a..e6ff649ab398c0703e8b104c112937e1424e9bef 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfredusum-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfredusum.ll @@ -1,13 +1,15 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfredusum.nxv4f16.nxv1f16( , , , - i32); + iXLen); -define @intrinsic_vfredusum_vs_nxv4f16_nxv1f16_nxv4f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfredusum_vs_nxv4f16_nxv1f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_vs_nxv4f16_nxv1f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu @@ -18,7 +20,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -28,9 +30,9 @@ declare @llvm.riscv.vfredusum.mask.nxv4f16.nxv1f16.nxv1i1( , , , - i32); + iXLen); -define @intrinsic_vfredusum_mask_vs_nxv4f16_nxv1f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfredusum_mask_vs_nxv4f16_nxv1f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv4f16_nxv1f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu @@ -42,7 +44,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -51,9 +53,9 @@ declare @llvm.riscv.vfredusum.nxv4f16.nxv2f16( , , , - i32); + iXLen); -define @intrinsic_vfredusum_vs_nxv4f16_nxv2f16_nxv4f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfredusum_vs_nxv4f16_nxv2f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_vs_nxv4f16_nxv2f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu @@ -64,7 +66,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -74,9 +76,9 @@ declare @llvm.riscv.vfredusum.mask.nxv4f16.nxv2f16.nxv2i1( , , , - i32); + iXLen); -define @intrinsic_vfredusum_mask_vs_nxv4f16_nxv2f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfredusum_mask_vs_nxv4f16_nxv2f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv4f16_nxv2f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu @@ -88,7 +90,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -97,9 +99,9 @@ declare @llvm.riscv.vfredusum.nxv4f16.nxv4f16( , , , - i32); + iXLen); -define @intrinsic_vfredusum_vs_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfredusum_vs_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_vs_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu @@ -110,7 +112,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -120,9 +122,9 @@ declare @llvm.riscv.vfredusum.mask.nxv4f16.nxv4f16.nxv4i1( , , , - i32); + iXLen); -define @intrinsic_vfredusum_mask_vs_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfredusum_mask_vs_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu @@ -134,7 +136,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -143,9 +145,9 @@ declare @llvm.riscv.vfredusum.nxv4f16.nxv8f16( , , , - i32); + iXLen); -define @intrinsic_vfredusum_vs_nxv4f16_nxv8f16_nxv4f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfredusum_vs_nxv4f16_nxv8f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_vs_nxv4f16_nxv8f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu @@ -156,7 +158,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -166,9 +168,9 @@ declare @llvm.riscv.vfredusum.mask.nxv4f16.nxv8f16.nxv8i1( , , , - i32); + iXLen); -define @intrinsic_vfredusum_mask_vs_nxv4f16_nxv8f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfredusum_mask_vs_nxv4f16_nxv8f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv4f16_nxv8f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu @@ -180,7 +182,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -189,9 +191,9 @@ declare @llvm.riscv.vfredusum.nxv4f16.nxv16f16( , , , - i32); + iXLen); -define @intrinsic_vfredusum_vs_nxv4f16_nxv16f16_nxv4f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfredusum_vs_nxv4f16_nxv16f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_vs_nxv4f16_nxv16f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu @@ -202,7 +204,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -212,9 +214,9 @@ declare @llvm.riscv.vfredusum.mask.nxv4f16.nxv16f16.nxv16i1( , , , - i32); + iXLen); -define @intrinsic_vfredusum_mask_vs_nxv4f16_nxv16f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfredusum_mask_vs_nxv4f16_nxv16f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv4f16_nxv16f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu @@ -226,7 +228,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -235,9 +237,9 @@ declare @llvm.riscv.vfredusum.nxv4f16.nxv32f16( , , , - i32); + iXLen); -define @intrinsic_vfredusum_vs_nxv4f16_nxv32f16_nxv4f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfredusum_vs_nxv4f16_nxv32f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_vs_nxv4f16_nxv32f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu @@ -248,7 +250,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -258,9 +260,9 @@ declare @llvm.riscv.vfredusum.mask.nxv4f16.nxv32f16.nxv32i1( , , , - i32); + iXLen); -define @intrinsic_vfredusum_mask_vs_nxv4f16_nxv32f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfredusum_mask_vs_nxv4f16_nxv32f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv4f16_nxv32f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu @@ -272,7 +274,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -281,9 +283,9 @@ declare @llvm.riscv.vfredusum.nxv2f32.nxv1f32( , , , - i32); + iXLen); -define @intrinsic_vfredusum_vs_nxv2f32_nxv1f32_nxv2f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfredusum_vs_nxv2f32_nxv1f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_vs_nxv2f32_nxv1f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu @@ -294,7 +296,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -304,9 +306,9 @@ declare @llvm.riscv.vfredusum.mask.nxv2f32.nxv1f32.nxv1i1( , , , - i32); + iXLen); -define @intrinsic_vfredusum_mask_vs_nxv2f32_nxv1f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfredusum_mask_vs_nxv2f32_nxv1f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv2f32_nxv1f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu @@ -318,7 +320,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -327,9 +329,9 @@ declare @llvm.riscv.vfredusum.nxv2f32.nxv2f32( , , , - i32); + iXLen); -define @intrinsic_vfredusum_vs_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfredusum_vs_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_vs_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu @@ -340,7 +342,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -350,9 +352,9 @@ declare @llvm.riscv.vfredusum.mask.nxv2f32.nxv2f32.nxv2i1( , , , - i32); + iXLen); -define @intrinsic_vfredusum_mask_vs_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfredusum_mask_vs_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu @@ -364,7 +366,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -373,9 +375,9 @@ declare @llvm.riscv.vfredusum.nxv2f32.nxv4f32( , , , - i32); + iXLen); -define @intrinsic_vfredusum_vs_nxv2f32_nxv4f32_nxv2f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfredusum_vs_nxv2f32_nxv4f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_vs_nxv2f32_nxv4f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu @@ -386,7 +388,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -396,9 +398,9 @@ declare @llvm.riscv.vfredusum.mask.nxv2f32.nxv4f32.nxv4i1( , , , - i32); + iXLen); -define @intrinsic_vfredusum_mask_vs_nxv2f32_nxv4f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfredusum_mask_vs_nxv2f32_nxv4f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv2f32_nxv4f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu @@ -410,7 +412,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -419,9 +421,9 @@ declare @llvm.riscv.vfredusum.nxv2f32.nxv8f32( , , , - i32); + iXLen); -define @intrinsic_vfredusum_vs_nxv2f32_nxv8f32_nxv2f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfredusum_vs_nxv2f32_nxv8f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_vs_nxv2f32_nxv8f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu @@ -432,7 +434,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -442,9 +444,9 @@ declare @llvm.riscv.vfredusum.mask.nxv2f32.nxv8f32.nxv8i1( , , , - i32); + iXLen); -define @intrinsic_vfredusum_mask_vs_nxv2f32_nxv8f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfredusum_mask_vs_nxv2f32_nxv8f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv2f32_nxv8f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu @@ -456,7 +458,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -465,9 +467,9 @@ declare @llvm.riscv.vfredusum.nxv2f32.nxv16f32( , , , - i32); + iXLen); -define @intrinsic_vfredusum_vs_nxv2f32_nxv16f32_nxv2f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfredusum_vs_nxv2f32_nxv16f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_vs_nxv2f32_nxv16f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu @@ -478,7 +480,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -488,9 +490,9 @@ declare @llvm.riscv.vfredusum.mask.nxv2f32.nxv16f32.nxv16i1 , , , - i32); + iXLen); -define @intrinsic_vfredusum_mask_vs_nxv2f32_nxv16f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfredusum_mask_vs_nxv2f32_nxv16f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv2f32_nxv16f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu @@ -502,7 +504,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -511,9 +513,9 @@ declare @llvm.riscv.vfredusum.nxv1f64.nxv1f64( , , , - i32); + iXLen); -define @intrinsic_vfredusum_vs_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfredusum_vs_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_vs_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu @@ -524,7 +526,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -534,9 +536,9 @@ declare @llvm.riscv.vfredusum.mask.nxv1f64.nxv1f64.nxv1i1( , , , - i32); + iXLen); -define @intrinsic_vfredusum_mask_vs_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfredusum_mask_vs_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu @@ -548,7 +550,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -557,9 +559,9 @@ declare @llvm.riscv.vfredusum.nxv1f64.nxv2f64( , , , - i32); + iXLen); -define @intrinsic_vfredusum_vs_nxv1f64_nxv2f64_nxv1f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfredusum_vs_nxv1f64_nxv2f64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_vs_nxv1f64_nxv2f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu @@ -570,7 +572,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -580,9 +582,9 @@ declare @llvm.riscv.vfredusum.mask.nxv1f64.nxv2f64.nxv2i1( , , , - i32); + iXLen); -define @intrinsic_vfredusum_mask_vs_nxv1f64_nxv2f64_nxv1f64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfredusum_mask_vs_nxv1f64_nxv2f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv1f64_nxv2f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu @@ -594,7 +596,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -603,9 +605,9 @@ declare @llvm.riscv.vfredusum.nxv1f64.nxv4f64( , , , - i32); + iXLen); -define @intrinsic_vfredusum_vs_nxv1f64_nxv4f64_nxv1f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfredusum_vs_nxv1f64_nxv4f64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_vs_nxv1f64_nxv4f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu @@ -616,7 +618,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -626,9 +628,9 @@ declare @llvm.riscv.vfredusum.mask.nxv1f64.nxv4f64.nxv4i1( , , , - i32); + iXLen); -define @intrinsic_vfredusum_mask_vs_nxv1f64_nxv4f64_nxv1f64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfredusum_mask_vs_nxv1f64_nxv4f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv1f64_nxv4f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu @@ -640,7 +642,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -649,9 +651,9 @@ declare @llvm.riscv.vfredusum.nxv1f64.nxv8f64( , , , - i32); + iXLen); -define @intrinsic_vfredusum_vs_nxv1f64_nxv8f64_nxv1f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfredusum_vs_nxv1f64_nxv8f64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_vs_nxv1f64_nxv8f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu @@ -662,7 +664,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -672,9 +674,9 @@ declare @llvm.riscv.vfredusum.mask.nxv1f64.nxv8f64.nxv8i1( , , , - i32); + iXLen); -define @intrinsic_vfredusum_mask_vs_nxv1f64_nxv8f64_nxv1f64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfredusum_mask_vs_nxv1f64_nxv8f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv1f64_nxv8f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu @@ -686,7 +688,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfrsqrt7-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfrsqrt7-rv64.ll deleted file mode 100644 index cf9d7a4c0af5a5f86b19d294cfdb95c5007fcb64..0000000000000000000000000000000000000000 --- a/llvm/test/CodeGen/RISCV/rvv/vfrsqrt7-rv64.ll +++ /dev/null @@ -1,617 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s -declare @llvm.riscv.vfrsqrt7.nxv1f16( - , - i64); - -define @intrinsic_vfrsqrt7_v_nxv1f16_nxv1f16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv1f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfrsqrt7.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrsqrt7.nxv1f16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfrsqrt7.mask.nxv1f16( - , - , - , - i64, - i64); - -define @intrinsic_vfrsqrt7_mask_v_nxv1f16_nxv1f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv1f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfrsqrt7.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrsqrt7.mask.nxv1f16( - %1, - %2, - %0, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfrsqrt7.nxv2f16( - , - i64); - -define @intrinsic_vfrsqrt7_v_nxv2f16_nxv2f16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv2f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfrsqrt7.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrsqrt7.nxv2f16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfrsqrt7.mask.nxv2f16( - , - , - , - i64, - i64); - -define @intrinsic_vfrsqrt7_mask_v_nxv2f16_nxv2f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv2f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfrsqrt7.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrsqrt7.mask.nxv2f16( - %1, - %2, - %0, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfrsqrt7.nxv4f16( - , - i64); - -define @intrinsic_vfrsqrt7_v_nxv4f16_nxv4f16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv4f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfrsqrt7.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrsqrt7.nxv4f16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfrsqrt7.mask.nxv4f16( - , - , - , - i64, - i64); - -define @intrinsic_vfrsqrt7_mask_v_nxv4f16_nxv4f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv4f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfrsqrt7.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrsqrt7.mask.nxv4f16( - %1, - %2, - %0, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfrsqrt7.nxv8f16( - , - i64); - -define @intrinsic_vfrsqrt7_v_nxv8f16_nxv8f16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv8f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfrsqrt7.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrsqrt7.nxv8f16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfrsqrt7.mask.nxv8f16( - , - , - , - i64, - i64); - -define @intrinsic_vfrsqrt7_mask_v_nxv8f16_nxv8f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv8f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfrsqrt7.v v8, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrsqrt7.mask.nxv8f16( - %1, - %2, - %0, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfrsqrt7.nxv16f16( - , - i64); - -define @intrinsic_vfrsqrt7_v_nxv16f16_nxv16f16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv16f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfrsqrt7.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrsqrt7.nxv16f16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfrsqrt7.mask.nxv16f16( - , - , - , - i64, - i64); - -define @intrinsic_vfrsqrt7_mask_v_nxv16f16_nxv16f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv16f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfrsqrt7.v v8, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrsqrt7.mask.nxv16f16( - %1, - %2, - %0, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfrsqrt7.nxv32f16( - , - i64); - -define @intrinsic_vfrsqrt7_v_nxv32f16_nxv32f16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv32f16_nxv32f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu -; CHECK-NEXT: vfrsqrt7.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrsqrt7.nxv32f16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfrsqrt7.mask.nxv32f16( - , - , - , - i64, - i64); - -define @intrinsic_vfrsqrt7_mask_v_nxv32f16_nxv32f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv32f16_nxv32f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu -; CHECK-NEXT: vfrsqrt7.v v8, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrsqrt7.mask.nxv32f16( - %1, - %2, - %0, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfrsqrt7.nxv1f32( - , - i64); - -define @intrinsic_vfrsqrt7_v_nxv1f32_nxv1f32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv1f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfrsqrt7.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrsqrt7.nxv1f32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfrsqrt7.mask.nxv1f32( - , - , - , - i64, - i64); - -define @intrinsic_vfrsqrt7_mask_v_nxv1f32_nxv1f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv1f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfrsqrt7.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrsqrt7.mask.nxv1f32( - %1, - %2, - %0, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfrsqrt7.nxv2f32( - , - i64); - -define @intrinsic_vfrsqrt7_v_nxv2f32_nxv2f32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv2f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfrsqrt7.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrsqrt7.nxv2f32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfrsqrt7.mask.nxv2f32( - , - , - , - i64, - i64); - -define @intrinsic_vfrsqrt7_mask_v_nxv2f32_nxv2f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv2f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfrsqrt7.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrsqrt7.mask.nxv2f32( - %1, - %2, - %0, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfrsqrt7.nxv4f32( - , - i64); - -define @intrinsic_vfrsqrt7_v_nxv4f32_nxv4f32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv4f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfrsqrt7.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrsqrt7.nxv4f32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfrsqrt7.mask.nxv4f32( - , - , - , - i64, - i64); - -define @intrinsic_vfrsqrt7_mask_v_nxv4f32_nxv4f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv4f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfrsqrt7.v v8, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrsqrt7.mask.nxv4f32( - %1, - %2, - %0, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfrsqrt7.nxv8f32( - , - i64); - -define @intrinsic_vfrsqrt7_v_nxv8f32_nxv8f32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv8f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfrsqrt7.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrsqrt7.nxv8f32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfrsqrt7.mask.nxv8f32( - , - , - , - i64, - i64); - -define @intrinsic_vfrsqrt7_mask_v_nxv8f32_nxv8f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv8f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfrsqrt7.v v8, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrsqrt7.mask.nxv8f32( - %1, - %2, - %0, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfrsqrt7.nxv16f32( - , - i64); - -define @intrinsic_vfrsqrt7_v_nxv16f32_nxv16f32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv16f32_nxv16f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu -; CHECK-NEXT: vfrsqrt7.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrsqrt7.nxv16f32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfrsqrt7.mask.nxv16f32( - , - , - , - i64, - i64); - -define @intrinsic_vfrsqrt7_mask_v_nxv16f32_nxv16f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv16f32_nxv16f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu -; CHECK-NEXT: vfrsqrt7.v v8, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrsqrt7.mask.nxv16f32( - %1, - %2, - %0, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfrsqrt7.nxv1f64( - , - i64); - -define @intrinsic_vfrsqrt7_v_nxv1f64_nxv1f64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv1f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vfrsqrt7.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrsqrt7.nxv1f64( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfrsqrt7.mask.nxv1f64( - , - , - , - i64, - i64); - -define @intrinsic_vfrsqrt7_mask_v_nxv1f64_nxv1f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv1f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vfrsqrt7.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrsqrt7.mask.nxv1f64( - %1, - %2, - %0, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfrsqrt7.nxv2f64( - , - i64); - -define @intrinsic_vfrsqrt7_v_nxv2f64_nxv2f64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv2f64_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vfrsqrt7.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrsqrt7.nxv2f64( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfrsqrt7.mask.nxv2f64( - , - , - , - i64, - i64); - -define @intrinsic_vfrsqrt7_mask_v_nxv2f64_nxv2f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv2f64_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vfrsqrt7.v v8, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrsqrt7.mask.nxv2f64( - %1, - %2, - %0, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfrsqrt7.nxv4f64( - , - i64); - -define @intrinsic_vfrsqrt7_v_nxv4f64_nxv4f64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv4f64_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vfrsqrt7.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrsqrt7.nxv4f64( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfrsqrt7.mask.nxv4f64( - , - , - , - i64, - i64); - -define @intrinsic_vfrsqrt7_mask_v_nxv4f64_nxv4f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv4f64_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vfrsqrt7.v v8, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrsqrt7.mask.nxv4f64( - %1, - %2, - %0, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfrsqrt7.nxv8f64( - , - i64); - -define @intrinsic_vfrsqrt7_v_nxv8f64_nxv8f64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv8f64_nxv8f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu -; CHECK-NEXT: vfrsqrt7.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrsqrt7.nxv8f64( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfrsqrt7.mask.nxv8f64( - , - , - , - i64, - i64); - -define @intrinsic_vfrsqrt7_mask_v_nxv8f64_nxv8f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv8f64_nxv8f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu -; CHECK-NEXT: vfrsqrt7.v v8, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrsqrt7.mask.nxv8f64( - %1, - %2, - %0, - i64 %3, i64 1) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfrsqrt7-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfrsqrt7.ll similarity index 87% rename from llvm/test/CodeGen/RISCV/rvv/vfrsqrt7-rv32.ll rename to llvm/test/CodeGen/RISCV/rvv/vfrsqrt7.ll index d0198a85b0c52a03dff7640db08c465dce8b2192..a521b6c2f2b57554fbe1144db0e3e2d93b9a62f3 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfrsqrt7-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfrsqrt7.ll @@ -1,11 +1,13 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfrsqrt7.nxv1f16( , - i32); + iXLen); -define @intrinsic_vfrsqrt7_v_nxv1f16_nxv1f16( %0, i32 %1) nounwind { +define @intrinsic_vfrsqrt7_v_nxv1f16_nxv1f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -14,7 +16,7 @@ define @intrinsic_vfrsqrt7_v_nxv1f16_nxv1f16( @llvm.riscv.vfrsqrt7.nxv1f16( %0, - i32 %1) + iXLen %1) ret %a } @@ -23,10 +25,10 @@ declare @llvm.riscv.vfrsqrt7.mask.nxv1f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfrsqrt7_mask_v_nxv1f16_nxv1f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfrsqrt7_mask_v_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -37,16 +39,16 @@ entry: %1, %2, %0, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfrsqrt7.nxv2f16( , - i32); + iXLen); -define @intrinsic_vfrsqrt7_v_nxv2f16_nxv2f16( %0, i32 %1) nounwind { +define @intrinsic_vfrsqrt7_v_nxv2f16_nxv2f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -55,7 +57,7 @@ define @intrinsic_vfrsqrt7_v_nxv2f16_nxv2f16( @llvm.riscv.vfrsqrt7.nxv2f16( %0, - i32 %1) + iXLen %1) ret %a } @@ -64,10 +66,10 @@ declare @llvm.riscv.vfrsqrt7.mask.nxv2f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfrsqrt7_mask_v_nxv2f16_nxv2f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfrsqrt7_mask_v_nxv2f16_nxv2f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -78,16 +80,16 @@ entry: %1, %2, %0, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfrsqrt7.nxv4f16( , - i32); + iXLen); -define @intrinsic_vfrsqrt7_v_nxv4f16_nxv4f16( %0, i32 %1) nounwind { +define @intrinsic_vfrsqrt7_v_nxv4f16_nxv4f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -96,7 +98,7 @@ define @intrinsic_vfrsqrt7_v_nxv4f16_nxv4f16( @llvm.riscv.vfrsqrt7.nxv4f16( %0, - i32 %1) + iXLen %1) ret %a } @@ -105,10 +107,10 @@ declare @llvm.riscv.vfrsqrt7.mask.nxv4f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfrsqrt7_mask_v_nxv4f16_nxv4f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfrsqrt7_mask_v_nxv4f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -119,16 +121,16 @@ entry: %1, %2, %0, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfrsqrt7.nxv8f16( , - i32); + iXLen); -define @intrinsic_vfrsqrt7_v_nxv8f16_nxv8f16( %0, i32 %1) nounwind { +define @intrinsic_vfrsqrt7_v_nxv8f16_nxv8f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -137,7 +139,7 @@ define @intrinsic_vfrsqrt7_v_nxv8f16_nxv8f16( @llvm.riscv.vfrsqrt7.nxv8f16( %0, - i32 %1) + iXLen %1) ret %a } @@ -146,10 +148,10 @@ declare @llvm.riscv.vfrsqrt7.mask.nxv8f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfrsqrt7_mask_v_nxv8f16_nxv8f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfrsqrt7_mask_v_nxv8f16_nxv8f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -160,16 +162,16 @@ entry: %1, %2, %0, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfrsqrt7.nxv16f16( , - i32); + iXLen); -define @intrinsic_vfrsqrt7_v_nxv16f16_nxv16f16( %0, i32 %1) nounwind { +define @intrinsic_vfrsqrt7_v_nxv16f16_nxv16f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -178,7 +180,7 @@ define @intrinsic_vfrsqrt7_v_nxv16f16_nxv16f16( @llvm.riscv.vfrsqrt7.nxv16f16( %0, - i32 %1) + iXLen %1) ret %a } @@ -187,10 +189,10 @@ declare @llvm.riscv.vfrsqrt7.mask.nxv16f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfrsqrt7_mask_v_nxv16f16_nxv16f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfrsqrt7_mask_v_nxv16f16_nxv16f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -201,16 +203,16 @@ entry: %1, %2, %0, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfrsqrt7.nxv32f16( , - i32); + iXLen); -define @intrinsic_vfrsqrt7_v_nxv32f16_nxv32f16( %0, i32 %1) nounwind { +define @intrinsic_vfrsqrt7_v_nxv32f16_nxv32f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu @@ -219,7 +221,7 @@ define @intrinsic_vfrsqrt7_v_nxv32f16_nxv32f16( @llvm.riscv.vfrsqrt7.nxv32f16( %0, - i32 %1) + iXLen %1) ret %a } @@ -228,10 +230,10 @@ declare @llvm.riscv.vfrsqrt7.mask.nxv32f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfrsqrt7_mask_v_nxv32f16_nxv32f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfrsqrt7_mask_v_nxv32f16_nxv32f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu @@ -242,16 +244,16 @@ entry: %1, %2, %0, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfrsqrt7.nxv1f32( , - i32); + iXLen); -define @intrinsic_vfrsqrt7_v_nxv1f32_nxv1f32( %0, i32 %1) nounwind { +define @intrinsic_vfrsqrt7_v_nxv1f32_nxv1f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -260,7 +262,7 @@ define @intrinsic_vfrsqrt7_v_nxv1f32_nxv1f32( @llvm.riscv.vfrsqrt7.nxv1f32( %0, - i32 %1) + iXLen %1) ret %a } @@ -269,10 +271,10 @@ declare @llvm.riscv.vfrsqrt7.mask.nxv1f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfrsqrt7_mask_v_nxv1f32_nxv1f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfrsqrt7_mask_v_nxv1f32_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -283,16 +285,16 @@ entry: %1, %2, %0, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfrsqrt7.nxv2f32( , - i32); + iXLen); -define @intrinsic_vfrsqrt7_v_nxv2f32_nxv2f32( %0, i32 %1) nounwind { +define @intrinsic_vfrsqrt7_v_nxv2f32_nxv2f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -301,7 +303,7 @@ define @intrinsic_vfrsqrt7_v_nxv2f32_nxv2f32( @llvm.riscv.vfrsqrt7.nxv2f32( %0, - i32 %1) + iXLen %1) ret %a } @@ -310,10 +312,10 @@ declare @llvm.riscv.vfrsqrt7.mask.nxv2f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfrsqrt7_mask_v_nxv2f32_nxv2f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfrsqrt7_mask_v_nxv2f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -324,16 +326,16 @@ entry: %1, %2, %0, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfrsqrt7.nxv4f32( , - i32); + iXLen); -define @intrinsic_vfrsqrt7_v_nxv4f32_nxv4f32( %0, i32 %1) nounwind { +define @intrinsic_vfrsqrt7_v_nxv4f32_nxv4f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -342,7 +344,7 @@ define @intrinsic_vfrsqrt7_v_nxv4f32_nxv4f32( @llvm.riscv.vfrsqrt7.nxv4f32( %0, - i32 %1) + iXLen %1) ret %a } @@ -351,10 +353,10 @@ declare @llvm.riscv.vfrsqrt7.mask.nxv4f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfrsqrt7_mask_v_nxv4f32_nxv4f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfrsqrt7_mask_v_nxv4f32_nxv4f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -365,16 +367,16 @@ entry: %1, %2, %0, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfrsqrt7.nxv8f32( , - i32); + iXLen); -define @intrinsic_vfrsqrt7_v_nxv8f32_nxv8f32( %0, i32 %1) nounwind { +define @intrinsic_vfrsqrt7_v_nxv8f32_nxv8f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -383,7 +385,7 @@ define @intrinsic_vfrsqrt7_v_nxv8f32_nxv8f32( @llvm.riscv.vfrsqrt7.nxv8f32( %0, - i32 %1) + iXLen %1) ret %a } @@ -392,10 +394,10 @@ declare @llvm.riscv.vfrsqrt7.mask.nxv8f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfrsqrt7_mask_v_nxv8f32_nxv8f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfrsqrt7_mask_v_nxv8f32_nxv8f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -406,16 +408,16 @@ entry: %1, %2, %0, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfrsqrt7.nxv16f32( , - i32); + iXLen); -define @intrinsic_vfrsqrt7_v_nxv16f32_nxv16f32( %0, i32 %1) nounwind { +define @intrinsic_vfrsqrt7_v_nxv16f32_nxv16f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu @@ -424,7 +426,7 @@ define @intrinsic_vfrsqrt7_v_nxv16f32_nxv16f32( @llvm.riscv.vfrsqrt7.nxv16f32( %0, - i32 %1) + iXLen %1) ret %a } @@ -433,10 +435,10 @@ declare @llvm.riscv.vfrsqrt7.mask.nxv16f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfrsqrt7_mask_v_nxv16f32_nxv16f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfrsqrt7_mask_v_nxv16f32_nxv16f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu @@ -447,16 +449,16 @@ entry: %1, %2, %0, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfrsqrt7.nxv1f64( , - i32); + iXLen); -define @intrinsic_vfrsqrt7_v_nxv1f64_nxv1f64( %0, i32 %1) nounwind { +define @intrinsic_vfrsqrt7_v_nxv1f64_nxv1f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu @@ -465,7 +467,7 @@ define @intrinsic_vfrsqrt7_v_nxv1f64_nxv1f64( @llvm.riscv.vfrsqrt7.nxv1f64( %0, - i32 %1) + iXLen %1) ret %a } @@ -474,10 +476,10 @@ declare @llvm.riscv.vfrsqrt7.mask.nxv1f64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfrsqrt7_mask_v_nxv1f64_nxv1f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfrsqrt7_mask_v_nxv1f64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu @@ -488,16 +490,16 @@ entry: %1, %2, %0, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfrsqrt7.nxv2f64( , - i32); + iXLen); -define @intrinsic_vfrsqrt7_v_nxv2f64_nxv2f64( %0, i32 %1) nounwind { +define @intrinsic_vfrsqrt7_v_nxv2f64_nxv2f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu @@ -506,7 +508,7 @@ define @intrinsic_vfrsqrt7_v_nxv2f64_nxv2f64( @llvm.riscv.vfrsqrt7.nxv2f64( %0, - i32 %1) + iXLen %1) ret %a } @@ -515,10 +517,10 @@ declare @llvm.riscv.vfrsqrt7.mask.nxv2f64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfrsqrt7_mask_v_nxv2f64_nxv2f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfrsqrt7_mask_v_nxv2f64_nxv2f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu @@ -529,16 +531,16 @@ entry: %1, %2, %0, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfrsqrt7.nxv4f64( , - i32); + iXLen); -define @intrinsic_vfrsqrt7_v_nxv4f64_nxv4f64( %0, i32 %1) nounwind { +define @intrinsic_vfrsqrt7_v_nxv4f64_nxv4f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu @@ -547,7 +549,7 @@ define @intrinsic_vfrsqrt7_v_nxv4f64_nxv4f64( @llvm.riscv.vfrsqrt7.nxv4f64( %0, - i32 %1) + iXLen %1) ret %a } @@ -556,10 +558,10 @@ declare @llvm.riscv.vfrsqrt7.mask.nxv4f64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfrsqrt7_mask_v_nxv4f64_nxv4f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfrsqrt7_mask_v_nxv4f64_nxv4f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu @@ -570,16 +572,16 @@ entry: %1, %2, %0, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfrsqrt7.nxv8f64( , - i32); + iXLen); -define @intrinsic_vfrsqrt7_v_nxv8f64_nxv8f64( %0, i32 %1) nounwind { +define @intrinsic_vfrsqrt7_v_nxv8f64_nxv8f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu @@ -588,7 +590,7 @@ define @intrinsic_vfrsqrt7_v_nxv8f64_nxv8f64( @llvm.riscv.vfrsqrt7.nxv8f64( %0, - i32 %1) + iXLen %1) ret %a } @@ -597,10 +599,10 @@ declare @llvm.riscv.vfrsqrt7.mask.nxv8f64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfrsqrt7_mask_v_nxv8f64_nxv8f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfrsqrt7_mask_v_nxv8f64_nxv8f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu @@ -611,7 +613,7 @@ entry: %1, %2, %0, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfrsub-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfrsub-rv64.ll deleted file mode 100644 index 0477554c914190c247269ade68b3d42f53bf604f..0000000000000000000000000000000000000000 --- a/llvm/test/CodeGen/RISCV/rvv/vfrsub-rv64.ll +++ /dev/null @@ -1,678 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v -mattr=+zfh \ -; RUN: -mattr=+d -verify-machineinstrs \ -; RUN: -target-abi=lp64d < %s | FileCheck %s -declare @llvm.riscv.vfrsub.nxv1f16.f16( - , - half, - i64); - -define @intrinsic_vfrsub_vf_nxv1f16_nxv1f16_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfrsub_vf_nxv1f16_nxv1f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfrsub.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrsub.nxv1f16.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfrsub.mask.nxv1f16.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfrsub_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv1f16_nxv1f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfrsub.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrsub.mask.nxv1f16.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfrsub.nxv2f16.f16( - , - half, - i64); - -define @intrinsic_vfrsub_vf_nxv2f16_nxv2f16_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfrsub_vf_nxv2f16_nxv2f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfrsub.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrsub.nxv2f16.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfrsub.mask.nxv2f16.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfrsub_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv2f16_nxv2f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfrsub.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrsub.mask.nxv2f16.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfrsub.nxv4f16.f16( - , - half, - i64); - -define @intrinsic_vfrsub_vf_nxv4f16_nxv4f16_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfrsub_vf_nxv4f16_nxv4f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfrsub.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrsub.nxv4f16.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfrsub.mask.nxv4f16.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfrsub_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv4f16_nxv4f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfrsub.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrsub.mask.nxv4f16.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfrsub.nxv8f16.f16( - , - half, - i64); - -define @intrinsic_vfrsub_vf_nxv8f16_nxv8f16_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfrsub_vf_nxv8f16_nxv8f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfrsub.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrsub.nxv8f16.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfrsub.mask.nxv8f16.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfrsub_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv8f16_nxv8f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfrsub.vf v8, v10, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrsub.mask.nxv8f16.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfrsub.nxv16f16.f16( - , - half, - i64); - -define @intrinsic_vfrsub_vf_nxv16f16_nxv16f16_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfrsub_vf_nxv16f16_nxv16f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfrsub.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrsub.nxv16f16.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfrsub.mask.nxv16f16.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfrsub_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv16f16_nxv16f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfrsub.vf v8, v12, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrsub.mask.nxv16f16.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfrsub.nxv32f16.f16( - , - half, - i64); - -define @intrinsic_vfrsub_vf_nxv32f16_nxv32f16_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfrsub_vf_nxv32f16_nxv32f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu -; CHECK-NEXT: vfrsub.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrsub.nxv32f16.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfrsub.mask.nxv32f16.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfrsub_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv32f16_nxv32f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu -; CHECK-NEXT: vfrsub.vf v8, v16, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrsub.mask.nxv32f16.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfrsub.nxv1f32.f32( - , - float, - i64); - -define @intrinsic_vfrsub_vf_nxv1f32_nxv1f32_f32( %0, float %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfrsub_vf_nxv1f32_nxv1f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfrsub.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrsub.nxv1f32.f32( - %0, - float %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfrsub.mask.nxv1f32.f32( - , - , - float, - , - i64, - i64); - -define @intrinsic_vfrsub_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv1f32_nxv1f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfrsub.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrsub.mask.nxv1f32.f32( - %0, - %1, - float %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfrsub.nxv2f32.f32( - , - float, - i64); - -define @intrinsic_vfrsub_vf_nxv2f32_nxv2f32_f32( %0, float %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfrsub_vf_nxv2f32_nxv2f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfrsub.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrsub.nxv2f32.f32( - %0, - float %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfrsub.mask.nxv2f32.f32( - , - , - float, - , - i64, - i64); - -define @intrinsic_vfrsub_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv2f32_nxv2f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfrsub.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrsub.mask.nxv2f32.f32( - %0, - %1, - float %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfrsub.nxv4f32.f32( - , - float, - i64); - -define @intrinsic_vfrsub_vf_nxv4f32_nxv4f32_f32( %0, float %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfrsub_vf_nxv4f32_nxv4f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfrsub.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrsub.nxv4f32.f32( - %0, - float %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfrsub.mask.nxv4f32.f32( - , - , - float, - , - i64, - i64); - -define @intrinsic_vfrsub_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv4f32_nxv4f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfrsub.vf v8, v10, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrsub.mask.nxv4f32.f32( - %0, - %1, - float %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfrsub.nxv8f32.f32( - , - float, - i64); - -define @intrinsic_vfrsub_vf_nxv8f32_nxv8f32_f32( %0, float %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfrsub_vf_nxv8f32_nxv8f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfrsub.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrsub.nxv8f32.f32( - %0, - float %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfrsub.mask.nxv8f32.f32( - , - , - float, - , - i64, - i64); - -define @intrinsic_vfrsub_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv8f32_nxv8f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfrsub.vf v8, v12, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrsub.mask.nxv8f32.f32( - %0, - %1, - float %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfrsub.nxv16f32.f32( - , - float, - i64); - -define @intrinsic_vfrsub_vf_nxv16f32_nxv16f32_f32( %0, float %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfrsub_vf_nxv16f32_nxv16f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu -; CHECK-NEXT: vfrsub.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrsub.nxv16f32.f32( - %0, - float %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfrsub.mask.nxv16f32.f32( - , - , - float, - , - i64, - i64); - -define @intrinsic_vfrsub_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv16f32_nxv16f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu -; CHECK-NEXT: vfrsub.vf v8, v16, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrsub.mask.nxv16f32.f32( - %0, - %1, - float %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfrsub.nxv1f64.f64( - , - double, - i64); - -define @intrinsic_vfrsub_vf_nxv1f64_nxv1f64_f64( %0, double %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfrsub_vf_nxv1f64_nxv1f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vfrsub.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrsub.nxv1f64.f64( - %0, - double %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfrsub.mask.nxv1f64.f64( - , - , - double, - , - i64, - i64); - -define @intrinsic_vfrsub_mask_vf_nxv1f64_nxv1f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv1f64_nxv1f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vfrsub.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrsub.mask.nxv1f64.f64( - %0, - %1, - double %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfrsub.nxv2f64.f64( - , - double, - i64); - -define @intrinsic_vfrsub_vf_nxv2f64_nxv2f64_f64( %0, double %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfrsub_vf_nxv2f64_nxv2f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vfrsub.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrsub.nxv2f64.f64( - %0, - double %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfrsub.mask.nxv2f64.f64( - , - , - double, - , - i64, - i64); - -define @intrinsic_vfrsub_mask_vf_nxv2f64_nxv2f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv2f64_nxv2f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vfrsub.vf v8, v10, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrsub.mask.nxv2f64.f64( - %0, - %1, - double %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfrsub.nxv4f64.f64( - , - double, - i64); - -define @intrinsic_vfrsub_vf_nxv4f64_nxv4f64_f64( %0, double %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfrsub_vf_nxv4f64_nxv4f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vfrsub.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrsub.nxv4f64.f64( - %0, - double %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfrsub.mask.nxv4f64.f64( - , - , - double, - , - i64, - i64); - -define @intrinsic_vfrsub_mask_vf_nxv4f64_nxv4f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv4f64_nxv4f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vfrsub.vf v8, v12, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrsub.mask.nxv4f64.f64( - %0, - %1, - double %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfrsub.nxv8f64.f64( - , - double, - i64); - -define @intrinsic_vfrsub_vf_nxv8f64_nxv8f64_f64( %0, double %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfrsub_vf_nxv8f64_nxv8f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu -; CHECK-NEXT: vfrsub.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrsub.nxv8f64.f64( - %0, - double %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfrsub.mask.nxv8f64.f64( - , - , - double, - , - i64, - i64); - -define @intrinsic_vfrsub_mask_vf_nxv8f64_nxv8f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv8f64_nxv8f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu -; CHECK-NEXT: vfrsub.vf v8, v16, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrsub.mask.nxv8f64.f64( - %0, - %1, - double %2, - %3, - i64 %4, i64 1) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfrsub-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfrsub.ll similarity index 86% rename from llvm/test/CodeGen/RISCV/rvv/vfrsub-rv32.ll rename to llvm/test/CodeGen/RISCV/rvv/vfrsub.ll index eab5b2a414e189f7b2a6531291ba5e32383f75cf..3fb281562088c8e5693c5321e661e502fe7136a3 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfrsub-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfrsub.ll @@ -1,12 +1,14 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: -target-abi=ilp32d < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfrsub.nxv1f16.f16( , half, - i32); + iXLen); -define @intrinsic_vfrsub_vf_nxv1f16_nxv1f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfrsub_vf_nxv1f16_nxv1f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -16,7 +18,7 @@ entry: %a = call @llvm.riscv.vfrsub.nxv1f16.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -26,10 +28,10 @@ declare @llvm.riscv.vfrsub.mask.nxv1f16.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfrsub_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfrsub_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -41,7 +43,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -49,9 +51,9 @@ entry: declare @llvm.riscv.vfrsub.nxv2f16.f16( , half, - i32); + iXLen); -define @intrinsic_vfrsub_vf_nxv2f16_nxv2f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfrsub_vf_nxv2f16_nxv2f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -61,7 +63,7 @@ entry: %a = call @llvm.riscv.vfrsub.nxv2f16.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -71,10 +73,10 @@ declare @llvm.riscv.vfrsub.mask.nxv2f16.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfrsub_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfrsub_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -86,7 +88,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -94,9 +96,9 @@ entry: declare @llvm.riscv.vfrsub.nxv4f16.f16( , half, - i32); + iXLen); -define @intrinsic_vfrsub_vf_nxv4f16_nxv4f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfrsub_vf_nxv4f16_nxv4f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -106,7 +108,7 @@ entry: %a = call @llvm.riscv.vfrsub.nxv4f16.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -116,10 +118,10 @@ declare @llvm.riscv.vfrsub.mask.nxv4f16.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfrsub_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfrsub_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -131,7 +133,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -139,9 +141,9 @@ entry: declare @llvm.riscv.vfrsub.nxv8f16.f16( , half, - i32); + iXLen); -define @intrinsic_vfrsub_vf_nxv8f16_nxv8f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfrsub_vf_nxv8f16_nxv8f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -151,7 +153,7 @@ entry: %a = call @llvm.riscv.vfrsub.nxv8f16.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -161,10 +163,10 @@ declare @llvm.riscv.vfrsub.mask.nxv8f16.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfrsub_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfrsub_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -176,7 +178,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -184,9 +186,9 @@ entry: declare @llvm.riscv.vfrsub.nxv16f16.f16( , half, - i32); + iXLen); -define @intrinsic_vfrsub_vf_nxv16f16_nxv16f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfrsub_vf_nxv16f16_nxv16f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -196,7 +198,7 @@ entry: %a = call @llvm.riscv.vfrsub.nxv16f16.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -206,10 +208,10 @@ declare @llvm.riscv.vfrsub.mask.nxv16f16.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfrsub_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfrsub_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -221,7 +223,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -229,9 +231,9 @@ entry: declare @llvm.riscv.vfrsub.nxv32f16.f16( , half, - i32); + iXLen); -define @intrinsic_vfrsub_vf_nxv32f16_nxv32f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfrsub_vf_nxv32f16_nxv32f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu @@ -241,7 +243,7 @@ entry: %a = call @llvm.riscv.vfrsub.nxv32f16.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -251,10 +253,10 @@ declare @llvm.riscv.vfrsub.mask.nxv32f16.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfrsub_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfrsub_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu @@ -266,7 +268,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -274,9 +276,9 @@ entry: declare @llvm.riscv.vfrsub.nxv1f32.f32( , float, - i32); + iXLen); -define @intrinsic_vfrsub_vf_nxv1f32_nxv1f32_f32( %0, float %1, i32 %2) nounwind { +define @intrinsic_vfrsub_vf_nxv1f32_nxv1f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -286,7 +288,7 @@ entry: %a = call @llvm.riscv.vfrsub.nxv1f32.f32( %0, float %1, - i32 %2) + iXLen %2) ret %a } @@ -296,10 +298,10 @@ declare @llvm.riscv.vfrsub.mask.nxv1f32.f32( , float, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfrsub_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +define @intrinsic_vfrsub_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -311,7 +313,7 @@ entry: %1, float %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -319,9 +321,9 @@ entry: declare @llvm.riscv.vfrsub.nxv2f32.f32( , float, - i32); + iXLen); -define @intrinsic_vfrsub_vf_nxv2f32_nxv2f32_f32( %0, float %1, i32 %2) nounwind { +define @intrinsic_vfrsub_vf_nxv2f32_nxv2f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -331,7 +333,7 @@ entry: %a = call @llvm.riscv.vfrsub.nxv2f32.f32( %0, float %1, - i32 %2) + iXLen %2) ret %a } @@ -341,10 +343,10 @@ declare @llvm.riscv.vfrsub.mask.nxv2f32.f32( , float, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfrsub_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +define @intrinsic_vfrsub_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -356,7 +358,7 @@ entry: %1, float %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -364,9 +366,9 @@ entry: declare @llvm.riscv.vfrsub.nxv4f32.f32( , float, - i32); + iXLen); -define @intrinsic_vfrsub_vf_nxv4f32_nxv4f32_f32( %0, float %1, i32 %2) nounwind { +define @intrinsic_vfrsub_vf_nxv4f32_nxv4f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -376,7 +378,7 @@ entry: %a = call @llvm.riscv.vfrsub.nxv4f32.f32( %0, float %1, - i32 %2) + iXLen %2) ret %a } @@ -386,10 +388,10 @@ declare @llvm.riscv.vfrsub.mask.nxv4f32.f32( , float, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfrsub_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +define @intrinsic_vfrsub_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -401,7 +403,7 @@ entry: %1, float %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -409,9 +411,9 @@ entry: declare @llvm.riscv.vfrsub.nxv8f32.f32( , float, - i32); + iXLen); -define @intrinsic_vfrsub_vf_nxv8f32_nxv8f32_f32( %0, float %1, i32 %2) nounwind { +define @intrinsic_vfrsub_vf_nxv8f32_nxv8f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -421,7 +423,7 @@ entry: %a = call @llvm.riscv.vfrsub.nxv8f32.f32( %0, float %1, - i32 %2) + iXLen %2) ret %a } @@ -431,10 +433,10 @@ declare @llvm.riscv.vfrsub.mask.nxv8f32.f32( , float, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfrsub_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +define @intrinsic_vfrsub_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -446,7 +448,7 @@ entry: %1, float %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -454,9 +456,9 @@ entry: declare @llvm.riscv.vfrsub.nxv16f32.f32( , float, - i32); + iXLen); -define @intrinsic_vfrsub_vf_nxv16f32_nxv16f32_f32( %0, float %1, i32 %2) nounwind { +define @intrinsic_vfrsub_vf_nxv16f32_nxv16f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu @@ -466,7 +468,7 @@ entry: %a = call @llvm.riscv.vfrsub.nxv16f32.f32( %0, float %1, - i32 %2) + iXLen %2) ret %a } @@ -476,10 +478,10 @@ declare @llvm.riscv.vfrsub.mask.nxv16f32.f32( , float, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfrsub_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +define @intrinsic_vfrsub_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu @@ -491,7 +493,7 @@ entry: %1, float %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -499,9 +501,9 @@ entry: declare @llvm.riscv.vfrsub.nxv1f64.f64( , double, - i32); + iXLen); -define @intrinsic_vfrsub_vf_nxv1f64_nxv1f64_f64( %0, double %1, i32 %2) nounwind { +define @intrinsic_vfrsub_vf_nxv1f64_nxv1f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu @@ -511,7 +513,7 @@ entry: %a = call @llvm.riscv.vfrsub.nxv1f64.f64( %0, double %1, - i32 %2) + iXLen %2) ret %a } @@ -521,10 +523,10 @@ declare @llvm.riscv.vfrsub.mask.nxv1f64.f64( , double, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfrsub_mask_vf_nxv1f64_nxv1f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { +define @intrinsic_vfrsub_mask_vf_nxv1f64_nxv1f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu @@ -536,7 +538,7 @@ entry: %1, double %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -544,9 +546,9 @@ entry: declare @llvm.riscv.vfrsub.nxv2f64.f64( , double, - i32); + iXLen); -define @intrinsic_vfrsub_vf_nxv2f64_nxv2f64_f64( %0, double %1, i32 %2) nounwind { +define @intrinsic_vfrsub_vf_nxv2f64_nxv2f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu @@ -556,7 +558,7 @@ entry: %a = call @llvm.riscv.vfrsub.nxv2f64.f64( %0, double %1, - i32 %2) + iXLen %2) ret %a } @@ -566,10 +568,10 @@ declare @llvm.riscv.vfrsub.mask.nxv2f64.f64( , double, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfrsub_mask_vf_nxv2f64_nxv2f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { +define @intrinsic_vfrsub_mask_vf_nxv2f64_nxv2f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu @@ -581,7 +583,7 @@ entry: %1, double %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -589,9 +591,9 @@ entry: declare @llvm.riscv.vfrsub.nxv4f64.f64( , double, - i32); + iXLen); -define @intrinsic_vfrsub_vf_nxv4f64_nxv4f64_f64( %0, double %1, i32 %2) nounwind { +define @intrinsic_vfrsub_vf_nxv4f64_nxv4f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu @@ -601,7 +603,7 @@ entry: %a = call @llvm.riscv.vfrsub.nxv4f64.f64( %0, double %1, - i32 %2) + iXLen %2) ret %a } @@ -611,10 +613,10 @@ declare @llvm.riscv.vfrsub.mask.nxv4f64.f64( , double, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfrsub_mask_vf_nxv4f64_nxv4f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { +define @intrinsic_vfrsub_mask_vf_nxv4f64_nxv4f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu @@ -626,7 +628,7 @@ entry: %1, double %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -634,9 +636,9 @@ entry: declare @llvm.riscv.vfrsub.nxv8f64.f64( , double, - i32); + iXLen); -define @intrinsic_vfrsub_vf_nxv8f64_nxv8f64_f64( %0, double %1, i32 %2) nounwind { +define @intrinsic_vfrsub_vf_nxv8f64_nxv8f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu @@ -646,7 +648,7 @@ entry: %a = call @llvm.riscv.vfrsub.nxv8f64.f64( %0, double %1, - i32 %2) + iXLen %2) ret %a } @@ -656,10 +658,10 @@ declare @llvm.riscv.vfrsub.mask.nxv8f64.f64( , double, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfrsub_mask_vf_nxv8f64_nxv8f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { +define @intrinsic_vfrsub_mask_vf_nxv8f64_nxv8f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu @@ -671,7 +673,7 @@ entry: %1, double %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsgnj-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfsgnj-rv64.ll deleted file mode 100644 index d71fb8fb253520befccdf0a73f4fbbffa2158cbe..0000000000000000000000000000000000000000 --- a/llvm/test/CodeGen/RISCV/rvv/vfsgnj-rv64.ll +++ /dev/null @@ -1,1355 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: -target-abi=lp64d < %s | FileCheck %s -declare @llvm.riscv.vfsgnj.nxv1f16( - , - , - i64); - -define @intrinsic_vfsgnj_vv_nxv1f16_nxv1f16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv1f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnj.nxv1f16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnj.mask.nxv1f16( - , - , - , - , - i64, - i64); - -define @intrinsic_vfsgnj_mask_vv_nxv1f16_nxv1f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv1f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnj.mask.nxv1f16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnj.nxv2f16( - , - , - i64); - -define @intrinsic_vfsgnj_vv_nxv2f16_nxv2f16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv2f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnj.nxv2f16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnj.mask.nxv2f16( - , - , - , - , - i64, - i64); - -define @intrinsic_vfsgnj_mask_vv_nxv2f16_nxv2f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv2f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnj.mask.nxv2f16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnj.nxv4f16( - , - , - i64); - -define @intrinsic_vfsgnj_vv_nxv4f16_nxv4f16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv4f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnj.nxv4f16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnj.mask.nxv4f16( - , - , - , - , - i64, - i64); - -define @intrinsic_vfsgnj_mask_vv_nxv4f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv4f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnj.mask.nxv4f16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnj.nxv8f16( - , - , - i64); - -define @intrinsic_vfsgnj_vv_nxv8f16_nxv8f16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv8f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnj.nxv8f16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnj.mask.nxv8f16( - , - , - , - , - i64, - i64); - -define @intrinsic_vfsgnj_mask_vv_nxv8f16_nxv8f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv8f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnj.mask.nxv8f16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnj.nxv16f16( - , - , - i64); - -define @intrinsic_vfsgnj_vv_nxv16f16_nxv16f16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv16f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnj.nxv16f16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnj.mask.nxv16f16( - , - , - , - , - i64, - i64); - -define @intrinsic_vfsgnj_mask_vv_nxv16f16_nxv16f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv16f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnj.mask.nxv16f16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnj.nxv32f16( - , - , - i64); - -define @intrinsic_vfsgnj_vv_nxv32f16_nxv32f16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv32f16_nxv32f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnj.nxv32f16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnj.mask.nxv32f16( - , - , - , - , - i64, - i64); - -define @intrinsic_vfsgnj_mask_vv_nxv32f16_nxv32f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv32f16_nxv32f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v16, v24, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnj.mask.nxv32f16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnj.nxv1f32( - , - , - i64); - -define @intrinsic_vfsgnj_vv_nxv1f32_nxv1f32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv1f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnj.nxv1f32( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnj.mask.nxv1f32( - , - , - , - , - i64, - i64); - -define @intrinsic_vfsgnj_mask_vv_nxv1f32_nxv1f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv1f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnj.mask.nxv1f32( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnj.nxv2f32( - , - , - i64); - -define @intrinsic_vfsgnj_vv_nxv2f32_nxv2f32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv2f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnj.nxv2f32( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnj.mask.nxv2f32( - , - , - , - , - i64, - i64); - -define @intrinsic_vfsgnj_mask_vv_nxv2f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv2f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnj.mask.nxv2f32( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnj.nxv4f32( - , - , - i64); - -define @intrinsic_vfsgnj_vv_nxv4f32_nxv4f32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv4f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnj.nxv4f32( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnj.mask.nxv4f32( - , - , - , - , - i64, - i64); - -define @intrinsic_vfsgnj_mask_vv_nxv4f32_nxv4f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv4f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnj.mask.nxv4f32( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnj.nxv8f32( - , - , - i64); - -define @intrinsic_vfsgnj_vv_nxv8f32_nxv8f32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv8f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnj.nxv8f32( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnj.mask.nxv8f32( - , - , - , - , - i64, - i64); - -define @intrinsic_vfsgnj_mask_vv_nxv8f32_nxv8f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv8f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnj.mask.nxv8f32( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnj.nxv16f32( - , - , - i64); - -define @intrinsic_vfsgnj_vv_nxv16f32_nxv16f32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv16f32_nxv16f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnj.nxv16f32( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnj.mask.nxv16f32( - , - , - , - , - i64, - i64); - -define @intrinsic_vfsgnj_mask_vv_nxv16f32_nxv16f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv16f32_nxv16f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vl8re32.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v16, v24, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnj.mask.nxv16f32( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnj.nxv1f64( - , - , - i64); - -define @intrinsic_vfsgnj_vv_nxv1f64_nxv1f64( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv1f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnj.nxv1f64( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnj.mask.nxv1f64( - , - , - , - , - i64, - i64); - -define @intrinsic_vfsgnj_mask_vv_nxv1f64_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv1f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnj.mask.nxv1f64( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnj.nxv2f64( - , - , - i64); - -define @intrinsic_vfsgnj_vv_nxv2f64_nxv2f64( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv2f64_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnj.nxv2f64( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnj.mask.nxv2f64( - , - , - , - , - i64, - i64); - -define @intrinsic_vfsgnj_mask_vv_nxv2f64_nxv2f64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv2f64_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnj.mask.nxv2f64( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnj.nxv4f64( - , - , - i64); - -define @intrinsic_vfsgnj_vv_nxv4f64_nxv4f64( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv4f64_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnj.nxv4f64( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnj.mask.nxv4f64( - , - , - , - , - i64, - i64); - -define @intrinsic_vfsgnj_mask_vv_nxv4f64_nxv4f64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv4f64_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnj.mask.nxv4f64( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnj.nxv8f64( - , - , - i64); - -define @intrinsic_vfsgnj_vv_nxv8f64_nxv8f64( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv8f64_nxv8f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnj.nxv8f64( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnj.mask.nxv8f64( - , - , - , - , - i64, - i64); - -define @intrinsic_vfsgnj_mask_vv_nxv8f64_nxv8f64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv8f64_nxv8f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vl8re64.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v16, v24, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnj.mask.nxv8f64( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnj.nxv1f16.f16( - , - half, - i64); - -define @intrinsic_vfsgnj_vf_nxv1f16_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv1f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfsgnj.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnj.nxv1f16.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnj.mask.nxv1f16.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfsgnj_mask_vf_nxv1f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv1f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfsgnj.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnj.mask.nxv1f16.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnj.nxv2f16.f16( - , - half, - i64); - -define @intrinsic_vfsgnj_vf_nxv2f16_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv2f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfsgnj.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnj.nxv2f16.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnj.mask.nxv2f16.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfsgnj_mask_vf_nxv2f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv2f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfsgnj.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnj.mask.nxv2f16.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnj.nxv4f16.f16( - , - half, - i64); - -define @intrinsic_vfsgnj_vf_nxv4f16_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv4f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfsgnj.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnj.nxv4f16.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnj.mask.nxv4f16.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfsgnj_mask_vf_nxv4f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv4f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfsgnj.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnj.mask.nxv4f16.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnj.nxv8f16.f16( - , - half, - i64); - -define @intrinsic_vfsgnj_vf_nxv8f16_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv8f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfsgnj.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnj.nxv8f16.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnj.mask.nxv8f16.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfsgnj_mask_vf_nxv8f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv8f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfsgnj.vf v8, v10, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnj.mask.nxv8f16.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnj.nxv16f16.f16( - , - half, - i64); - -define @intrinsic_vfsgnj_vf_nxv16f16_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv16f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfsgnj.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnj.nxv16f16.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnj.mask.nxv16f16.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfsgnj_mask_vf_nxv16f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv16f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfsgnj.vf v8, v12, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnj.mask.nxv16f16.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnj.nxv32f16.f16( - , - half, - i64); - -define @intrinsic_vfsgnj_vf_nxv32f16_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv32f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu -; CHECK-NEXT: vfsgnj.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnj.nxv32f16.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnj.mask.nxv32f16.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfsgnj_mask_vf_nxv32f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv32f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu -; CHECK-NEXT: vfsgnj.vf v8, v16, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnj.mask.nxv32f16.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnj.nxv1f32.f32( - , - float, - i64); - -define @intrinsic_vfsgnj_vf_nxv1f32_f32( %0, float %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv1f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfsgnj.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnj.nxv1f32.f32( - %0, - float %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnj.mask.nxv1f32.f32( - , - , - float, - , - i64, - i64); - -define @intrinsic_vfsgnj_mask_vf_nxv1f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv1f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfsgnj.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnj.mask.nxv1f32.f32( - %0, - %1, - float %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnj.nxv2f32.f32( - , - float, - i64); - -define @intrinsic_vfsgnj_vf_nxv2f32_f32( %0, float %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv2f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfsgnj.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnj.nxv2f32.f32( - %0, - float %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnj.mask.nxv2f32.f32( - , - , - float, - , - i64, - i64); - -define @intrinsic_vfsgnj_mask_vf_nxv2f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv2f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfsgnj.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnj.mask.nxv2f32.f32( - %0, - %1, - float %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnj.nxv4f32.f32( - , - float, - i64); - -define @intrinsic_vfsgnj_vf_nxv4f32_f32( %0, float %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv4f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfsgnj.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnj.nxv4f32.f32( - %0, - float %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnj.mask.nxv4f32.f32( - , - , - float, - , - i64, - i64); - -define @intrinsic_vfsgnj_mask_vf_nxv4f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv4f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfsgnj.vf v8, v10, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnj.mask.nxv4f32.f32( - %0, - %1, - float %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnj.nxv8f32.f32( - , - float, - i64); - -define @intrinsic_vfsgnj_vf_nxv8f32_f32( %0, float %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv8f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfsgnj.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnj.nxv8f32.f32( - %0, - float %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnj.mask.nxv8f32.f32( - , - , - float, - , - i64, - i64); - -define @intrinsic_vfsgnj_mask_vf_nxv8f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv8f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfsgnj.vf v8, v12, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnj.mask.nxv8f32.f32( - %0, - %1, - float %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnj.nxv16f32.f32( - , - float, - i64); - -define @intrinsic_vfsgnj_vf_nxv16f32_f32( %0, float %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv16f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu -; CHECK-NEXT: vfsgnj.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnj.nxv16f32.f32( - %0, - float %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnj.mask.nxv16f32.f32( - , - , - float, - , - i64, - i64); - -define @intrinsic_vfsgnj_mask_vf_nxv16f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv16f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu -; CHECK-NEXT: vfsgnj.vf v8, v16, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnj.mask.nxv16f32.f32( - %0, - %1, - float %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnj.nxv1f64.f64( - , - double, - i64); - -define @intrinsic_vfsgnj_vf_nxv1f64_f64( %0, double %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv1f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vfsgnj.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnj.nxv1f64.f64( - %0, - double %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnj.mask.nxv1f64.f64( - , - , - double, - , - i64, - i64); - -define @intrinsic_vfsgnj_mask_vf_nxv1f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv1f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vfsgnj.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnj.mask.nxv1f64.f64( - %0, - %1, - double %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnj.nxv2f64.f64( - , - double, - i64); - -define @intrinsic_vfsgnj_vf_nxv2f64_f64( %0, double %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv2f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vfsgnj.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnj.nxv2f64.f64( - %0, - double %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnj.mask.nxv2f64.f64( - , - , - double, - , - i64, - i64); - -define @intrinsic_vfsgnj_mask_vf_nxv2f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv2f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vfsgnj.vf v8, v10, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnj.mask.nxv2f64.f64( - %0, - %1, - double %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnj.nxv4f64.f64( - , - double, - i64); - -define @intrinsic_vfsgnj_vf_nxv4f64_f64( %0, double %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv4f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vfsgnj.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnj.nxv4f64.f64( - %0, - double %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnj.mask.nxv4f64.f64( - , - , - double, - , - i64, - i64); - -define @intrinsic_vfsgnj_mask_vf_nxv4f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv4f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vfsgnj.vf v8, v12, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnj.mask.nxv4f64.f64( - %0, - %1, - double %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnj.nxv8f64.f64( - , - double, - i64); - -define @intrinsic_vfsgnj_vf_nxv8f64_f64( %0, double %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv8f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu -; CHECK-NEXT: vfsgnj.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnj.nxv8f64.f64( - %0, - double %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnj.mask.nxv8f64.f64( - , - , - double, - , - i64, - i64); - -define @intrinsic_vfsgnj_mask_vf_nxv8f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv8f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu -; CHECK-NEXT: vfsgnj.vf v8, v16, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnj.mask.nxv8f64.f64( - %0, - %1, - double %2, - %3, - i64 %4, i64 1) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsgnj-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfsgnj.ll similarity index 86% rename from llvm/test/CodeGen/RISCV/rvv/vfsgnj-rv32.ll rename to llvm/test/CodeGen/RISCV/rvv/vfsgnj.ll index c0e999a2433d057b9c11ce279d7e818f24ce46da..65a5592775cb5677450c490e4369c5175e0ede63 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfsgnj-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfsgnj.ll @@ -1,12 +1,14 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: -target-abi=ilp32d < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfsgnj.nxv1f16.nxv1f16( , , - i32); + iXLen); -define @intrinsic_vfsgnj_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, i32 %2) nounwind { +define @intrinsic_vfsgnj_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -16,7 +18,7 @@ entry: %a = call @llvm.riscv.vfsgnj.nxv1f16.nxv1f16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -26,10 +28,10 @@ declare @llvm.riscv.vfsgnj.mask.nxv1f16.nxv1f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnj_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnj_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -41,7 +43,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -49,9 +51,9 @@ entry: declare @llvm.riscv.vfsgnj.nxv2f16.nxv2f16( , , - i32); + iXLen); -define @intrinsic_vfsgnj_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, i32 %2) nounwind { +define @intrinsic_vfsgnj_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -61,7 +63,7 @@ entry: %a = call @llvm.riscv.vfsgnj.nxv2f16.nxv2f16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -71,10 +73,10 @@ declare @llvm.riscv.vfsgnj.mask.nxv2f16.nxv2f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnj_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnj_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -86,7 +88,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -94,9 +96,9 @@ entry: declare @llvm.riscv.vfsgnj.nxv4f16.nxv4f16( , , - i32); + iXLen); -define @intrinsic_vfsgnj_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, i32 %2) nounwind { +define @intrinsic_vfsgnj_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -106,7 +108,7 @@ entry: %a = call @llvm.riscv.vfsgnj.nxv4f16.nxv4f16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -116,10 +118,10 @@ declare @llvm.riscv.vfsgnj.mask.nxv4f16.nxv4f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnj_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnj_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -131,7 +133,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -139,9 +141,9 @@ entry: declare @llvm.riscv.vfsgnj.nxv8f16.nxv8f16( , , - i32); + iXLen); -define @intrinsic_vfsgnj_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, i32 %2) nounwind { +define @intrinsic_vfsgnj_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -151,7 +153,7 @@ entry: %a = call @llvm.riscv.vfsgnj.nxv8f16.nxv8f16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -161,10 +163,10 @@ declare @llvm.riscv.vfsgnj.mask.nxv8f16.nxv8f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnj_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnj_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -176,7 +178,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -184,9 +186,9 @@ entry: declare @llvm.riscv.vfsgnj.nxv16f16.nxv16f16( , , - i32); + iXLen); -define @intrinsic_vfsgnj_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, i32 %2) nounwind { +define @intrinsic_vfsgnj_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -196,7 +198,7 @@ entry: %a = call @llvm.riscv.vfsgnj.nxv16f16.nxv16f16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -206,10 +208,10 @@ declare @llvm.riscv.vfsgnj.mask.nxv16f16.nxv16f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnj_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnj_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -221,7 +223,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -229,9 +231,9 @@ entry: declare @llvm.riscv.vfsgnj.nxv32f16.nxv32f16( , , - i32); + iXLen); -define @intrinsic_vfsgnj_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, i32 %2) nounwind { +define @intrinsic_vfsgnj_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv32f16_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu @@ -241,7 +243,7 @@ entry: %a = call @llvm.riscv.vfsgnj.nxv32f16.nxv32f16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -251,10 +253,10 @@ declare @llvm.riscv.vfsgnj.mask.nxv32f16.nxv32f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnj_mask_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnj_mask_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv32f16_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re16.v v24, (a0) @@ -267,7 +269,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -275,9 +277,9 @@ entry: declare @llvm.riscv.vfsgnj.nxv1f32.nxv1f32( , , - i32); + iXLen); -define @intrinsic_vfsgnj_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, i32 %2) nounwind { +define @intrinsic_vfsgnj_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -287,7 +289,7 @@ entry: %a = call @llvm.riscv.vfsgnj.nxv1f32.nxv1f32( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -297,10 +299,10 @@ declare @llvm.riscv.vfsgnj.mask.nxv1f32.nxv1f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnj_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnj_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -312,7 +314,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -320,9 +322,9 @@ entry: declare @llvm.riscv.vfsgnj.nxv2f32.nxv2f32( , , - i32); + iXLen); -define @intrinsic_vfsgnj_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, i32 %2) nounwind { +define @intrinsic_vfsgnj_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -332,7 +334,7 @@ entry: %a = call @llvm.riscv.vfsgnj.nxv2f32.nxv2f32( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -342,10 +344,10 @@ declare @llvm.riscv.vfsgnj.mask.nxv2f32.nxv2f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnj_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnj_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -357,7 +359,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -365,9 +367,9 @@ entry: declare @llvm.riscv.vfsgnj.nxv4f32.nxv4f32( , , - i32); + iXLen); -define @intrinsic_vfsgnj_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, i32 %2) nounwind { +define @intrinsic_vfsgnj_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -377,7 +379,7 @@ entry: %a = call @llvm.riscv.vfsgnj.nxv4f32.nxv4f32( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -387,10 +389,10 @@ declare @llvm.riscv.vfsgnj.mask.nxv4f32.nxv4f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnj_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnj_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -402,7 +404,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -410,9 +412,9 @@ entry: declare @llvm.riscv.vfsgnj.nxv8f32.nxv8f32( , , - i32); + iXLen); -define @intrinsic_vfsgnj_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, i32 %2) nounwind { +define @intrinsic_vfsgnj_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -422,7 +424,7 @@ entry: %a = call @llvm.riscv.vfsgnj.nxv8f32.nxv8f32( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -432,10 +434,10 @@ declare @llvm.riscv.vfsgnj.mask.nxv8f32.nxv8f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnj_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnj_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -447,7 +449,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -455,9 +457,9 @@ entry: declare @llvm.riscv.vfsgnj.nxv16f32.nxv16f32( , , - i32); + iXLen); -define @intrinsic_vfsgnj_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, i32 %2) nounwind { +define @intrinsic_vfsgnj_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv16f32_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu @@ -467,7 +469,7 @@ entry: %a = call @llvm.riscv.vfsgnj.nxv16f32.nxv16f32( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -477,10 +479,10 @@ declare @llvm.riscv.vfsgnj.mask.nxv16f32.nxv16f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnj_mask_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnj_mask_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv16f32_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re32.v v24, (a0) @@ -493,7 +495,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -501,9 +503,9 @@ entry: declare @llvm.riscv.vfsgnj.nxv1f64.nxv1f64( , , - i32); + iXLen); -define @intrinsic_vfsgnj_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, i32 %2) nounwind { +define @intrinsic_vfsgnj_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu @@ -513,7 +515,7 @@ entry: %a = call @llvm.riscv.vfsgnj.nxv1f64.nxv1f64( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -523,10 +525,10 @@ declare @llvm.riscv.vfsgnj.mask.nxv1f64.nxv1f64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnj_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnj_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu @@ -538,7 +540,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -546,9 +548,9 @@ entry: declare @llvm.riscv.vfsgnj.nxv2f64.nxv2f64( , , - i32); + iXLen); -define @intrinsic_vfsgnj_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, i32 %2) nounwind { +define @intrinsic_vfsgnj_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu @@ -558,7 +560,7 @@ entry: %a = call @llvm.riscv.vfsgnj.nxv2f64.nxv2f64( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -568,10 +570,10 @@ declare @llvm.riscv.vfsgnj.mask.nxv2f64.nxv2f64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnj_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnj_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu @@ -583,7 +585,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -591,9 +593,9 @@ entry: declare @llvm.riscv.vfsgnj.nxv4f64.nxv4f64( , , - i32); + iXLen); -define @intrinsic_vfsgnj_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, i32 %2) nounwind { +define @intrinsic_vfsgnj_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu @@ -603,7 +605,7 @@ entry: %a = call @llvm.riscv.vfsgnj.nxv4f64.nxv4f64( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -613,10 +615,10 @@ declare @llvm.riscv.vfsgnj.mask.nxv4f64.nxv4f64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnj_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnj_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu @@ -628,7 +630,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -636,9 +638,9 @@ entry: declare @llvm.riscv.vfsgnj.nxv8f64.nxv8f64( , , - i32); + iXLen); -define @intrinsic_vfsgnj_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, i32 %2) nounwind { +define @intrinsic_vfsgnj_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv8f64_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu @@ -648,7 +650,7 @@ entry: %a = call @llvm.riscv.vfsgnj.nxv8f64.nxv8f64( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -658,10 +660,10 @@ declare @llvm.riscv.vfsgnj.mask.nxv8f64.nxv8f64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnj_mask_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnj_mask_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv8f64_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re64.v v24, (a0) @@ -674,7 +676,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -682,9 +684,9 @@ entry: declare @llvm.riscv.vfsgnj.nxv1f16.f16( , half, - i32); + iXLen); -define @intrinsic_vfsgnj_vf_nxv1f16_nxv1f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfsgnj_vf_nxv1f16_nxv1f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -694,7 +696,7 @@ entry: %a = call @llvm.riscv.vfsgnj.nxv1f16.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -704,10 +706,10 @@ declare @llvm.riscv.vfsgnj.mask.nxv1f16.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnj_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnj_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -719,7 +721,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -727,9 +729,9 @@ entry: declare @llvm.riscv.vfsgnj.nxv2f16.f16( , half, - i32); + iXLen); -define @intrinsic_vfsgnj_vf_nxv2f16_nxv2f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfsgnj_vf_nxv2f16_nxv2f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -739,7 +741,7 @@ entry: %a = call @llvm.riscv.vfsgnj.nxv2f16.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -749,10 +751,10 @@ declare @llvm.riscv.vfsgnj.mask.nxv2f16.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnj_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnj_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -764,7 +766,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -772,9 +774,9 @@ entry: declare @llvm.riscv.vfsgnj.nxv4f16.f16( , half, - i32); + iXLen); -define @intrinsic_vfsgnj_vf_nxv4f16_nxv4f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfsgnj_vf_nxv4f16_nxv4f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -784,7 +786,7 @@ entry: %a = call @llvm.riscv.vfsgnj.nxv4f16.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -794,10 +796,10 @@ declare @llvm.riscv.vfsgnj.mask.nxv4f16.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnj_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnj_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -809,7 +811,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -817,9 +819,9 @@ entry: declare @llvm.riscv.vfsgnj.nxv8f16.f16( , half, - i32); + iXLen); -define @intrinsic_vfsgnj_vf_nxv8f16_nxv8f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfsgnj_vf_nxv8f16_nxv8f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -829,7 +831,7 @@ entry: %a = call @llvm.riscv.vfsgnj.nxv8f16.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -839,10 +841,10 @@ declare @llvm.riscv.vfsgnj.mask.nxv8f16.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnj_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnj_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -854,7 +856,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -862,9 +864,9 @@ entry: declare @llvm.riscv.vfsgnj.nxv16f16.f16( , half, - i32); + iXLen); -define @intrinsic_vfsgnj_vf_nxv16f16_nxv16f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfsgnj_vf_nxv16f16_nxv16f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -874,7 +876,7 @@ entry: %a = call @llvm.riscv.vfsgnj.nxv16f16.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -884,10 +886,10 @@ declare @llvm.riscv.vfsgnj.mask.nxv16f16.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnj_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnj_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -899,7 +901,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -907,9 +909,9 @@ entry: declare @llvm.riscv.vfsgnj.nxv32f16.f16( , half, - i32); + iXLen); -define @intrinsic_vfsgnj_vf_nxv32f16_nxv32f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfsgnj_vf_nxv32f16_nxv32f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu @@ -919,7 +921,7 @@ entry: %a = call @llvm.riscv.vfsgnj.nxv32f16.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -929,10 +931,10 @@ declare @llvm.riscv.vfsgnj.mask.nxv32f16.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnj_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnj_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu @@ -944,7 +946,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -952,9 +954,9 @@ entry: declare @llvm.riscv.vfsgnj.nxv1f32.f32( , float, - i32); + iXLen); -define @intrinsic_vfsgnj_vf_nxv1f32_nxv1f32_f32( %0, float %1, i32 %2) nounwind { +define @intrinsic_vfsgnj_vf_nxv1f32_nxv1f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -964,7 +966,7 @@ entry: %a = call @llvm.riscv.vfsgnj.nxv1f32.f32( %0, float %1, - i32 %2) + iXLen %2) ret %a } @@ -974,10 +976,10 @@ declare @llvm.riscv.vfsgnj.mask.nxv1f32.f32( , float, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnj_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnj_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -989,7 +991,7 @@ entry: %1, float %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -997,9 +999,9 @@ entry: declare @llvm.riscv.vfsgnj.nxv2f32.f32( , float, - i32); + iXLen); -define @intrinsic_vfsgnj_vf_nxv2f32_nxv2f32_f32( %0, float %1, i32 %2) nounwind { +define @intrinsic_vfsgnj_vf_nxv2f32_nxv2f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -1009,7 +1011,7 @@ entry: %a = call @llvm.riscv.vfsgnj.nxv2f32.f32( %0, float %1, - i32 %2) + iXLen %2) ret %a } @@ -1019,10 +1021,10 @@ declare @llvm.riscv.vfsgnj.mask.nxv2f32.f32( , float, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnj_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnj_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -1034,7 +1036,7 @@ entry: %1, float %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1042,9 +1044,9 @@ entry: declare @llvm.riscv.vfsgnj.nxv4f32.f32( , float, - i32); + iXLen); -define @intrinsic_vfsgnj_vf_nxv4f32_nxv4f32_f32( %0, float %1, i32 %2) nounwind { +define @intrinsic_vfsgnj_vf_nxv4f32_nxv4f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -1054,7 +1056,7 @@ entry: %a = call @llvm.riscv.vfsgnj.nxv4f32.f32( %0, float %1, - i32 %2) + iXLen %2) ret %a } @@ -1064,10 +1066,10 @@ declare @llvm.riscv.vfsgnj.mask.nxv4f32.f32( , float, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnj_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnj_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -1079,7 +1081,7 @@ entry: %1, float %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1087,9 +1089,9 @@ entry: declare @llvm.riscv.vfsgnj.nxv8f32.f32( , float, - i32); + iXLen); -define @intrinsic_vfsgnj_vf_nxv8f32_nxv8f32_f32( %0, float %1, i32 %2) nounwind { +define @intrinsic_vfsgnj_vf_nxv8f32_nxv8f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -1099,7 +1101,7 @@ entry: %a = call @llvm.riscv.vfsgnj.nxv8f32.f32( %0, float %1, - i32 %2) + iXLen %2) ret %a } @@ -1109,10 +1111,10 @@ declare @llvm.riscv.vfsgnj.mask.nxv8f32.f32( , float, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnj_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnj_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -1124,7 +1126,7 @@ entry: %1, float %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1132,9 +1134,9 @@ entry: declare @llvm.riscv.vfsgnj.nxv16f32.f32( , float, - i32); + iXLen); -define @intrinsic_vfsgnj_vf_nxv16f32_nxv16f32_f32( %0, float %1, i32 %2) nounwind { +define @intrinsic_vfsgnj_vf_nxv16f32_nxv16f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu @@ -1144,7 +1146,7 @@ entry: %a = call @llvm.riscv.vfsgnj.nxv16f32.f32( %0, float %1, - i32 %2) + iXLen %2) ret %a } @@ -1154,10 +1156,10 @@ declare @llvm.riscv.vfsgnj.mask.nxv16f32.f32( , float, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnj_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnj_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu @@ -1169,7 +1171,7 @@ entry: %1, float %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1177,9 +1179,9 @@ entry: declare @llvm.riscv.vfsgnj.nxv1f64.f64( , double, - i32); + iXLen); -define @intrinsic_vfsgnj_vf_nxv1f64_nxv1f64_f64( %0, double %1, i32 %2) nounwind { +define @intrinsic_vfsgnj_vf_nxv1f64_nxv1f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu @@ -1189,7 +1191,7 @@ entry: %a = call @llvm.riscv.vfsgnj.nxv1f64.f64( %0, double %1, - i32 %2) + iXLen %2) ret %a } @@ -1199,10 +1201,10 @@ declare @llvm.riscv.vfsgnj.mask.nxv1f64.f64( , double, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnj_mask_vf_nxv1f64_nxv1f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnj_mask_vf_nxv1f64_nxv1f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu @@ -1214,7 +1216,7 @@ entry: %1, double %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1222,9 +1224,9 @@ entry: declare @llvm.riscv.vfsgnj.nxv2f64.f64( , double, - i32); + iXLen); -define @intrinsic_vfsgnj_vf_nxv2f64_nxv2f64_f64( %0, double %1, i32 %2) nounwind { +define @intrinsic_vfsgnj_vf_nxv2f64_nxv2f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu @@ -1234,7 +1236,7 @@ entry: %a = call @llvm.riscv.vfsgnj.nxv2f64.f64( %0, double %1, - i32 %2) + iXLen %2) ret %a } @@ -1244,10 +1246,10 @@ declare @llvm.riscv.vfsgnj.mask.nxv2f64.f64( , double, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnj_mask_vf_nxv2f64_nxv2f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnj_mask_vf_nxv2f64_nxv2f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu @@ -1259,7 +1261,7 @@ entry: %1, double %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1267,9 +1269,9 @@ entry: declare @llvm.riscv.vfsgnj.nxv4f64.f64( , double, - i32); + iXLen); -define @intrinsic_vfsgnj_vf_nxv4f64_nxv4f64_f64( %0, double %1, i32 %2) nounwind { +define @intrinsic_vfsgnj_vf_nxv4f64_nxv4f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu @@ -1279,7 +1281,7 @@ entry: %a = call @llvm.riscv.vfsgnj.nxv4f64.f64( %0, double %1, - i32 %2) + iXLen %2) ret %a } @@ -1289,10 +1291,10 @@ declare @llvm.riscv.vfsgnj.mask.nxv4f64.f64( , double, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnj_mask_vf_nxv4f64_nxv4f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnj_mask_vf_nxv4f64_nxv4f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu @@ -1304,7 +1306,7 @@ entry: %1, double %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1312,9 +1314,9 @@ entry: declare @llvm.riscv.vfsgnj.nxv8f64.f64( , double, - i32); + iXLen); -define @intrinsic_vfsgnj_vf_nxv8f64_nxv8f64_f64( %0, double %1, i32 %2) nounwind { +define @intrinsic_vfsgnj_vf_nxv8f64_nxv8f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu @@ -1324,7 +1326,7 @@ entry: %a = call @llvm.riscv.vfsgnj.nxv8f64.f64( %0, double %1, - i32 %2) + iXLen %2) ret %a } @@ -1334,10 +1336,10 @@ declare @llvm.riscv.vfsgnj.mask.nxv8f64.f64( , double, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnj_mask_vf_nxv8f64_nxv8f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnj_mask_vf_nxv8f64_nxv8f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu @@ -1349,7 +1351,7 @@ entry: %1, double %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsgnjn-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfsgnjn-rv64.ll deleted file mode 100644 index f751fd74063894baea62fe0792ff5ddfb84de68b..0000000000000000000000000000000000000000 --- a/llvm/test/CodeGen/RISCV/rvv/vfsgnjn-rv64.ll +++ /dev/null @@ -1,1355 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: -target-abi=lp64d < %s | FileCheck %s -declare @llvm.riscv.vfsgnjn.nxv1f16( - , - , - i64); - -define @intrinsic_vfsgnjn_vv_nxv1f16_nxv1f16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv1f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjn.nxv1f16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnjn.mask.nxv1f16( - , - , - , - , - i64, - i64); - -define @intrinsic_vfsgnjn_mask_vv_nxv1f16_nxv1f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv1f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjn.mask.nxv1f16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnjn.nxv2f16( - , - , - i64); - -define @intrinsic_vfsgnjn_vv_nxv2f16_nxv2f16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv2f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjn.nxv2f16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnjn.mask.nxv2f16( - , - , - , - , - i64, - i64); - -define @intrinsic_vfsgnjn_mask_vv_nxv2f16_nxv2f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv2f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjn.mask.nxv2f16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnjn.nxv4f16( - , - , - i64); - -define @intrinsic_vfsgnjn_vv_nxv4f16_nxv4f16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv4f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjn.nxv4f16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnjn.mask.nxv4f16( - , - , - , - , - i64, - i64); - -define @intrinsic_vfsgnjn_mask_vv_nxv4f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv4f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjn.mask.nxv4f16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnjn.nxv8f16( - , - , - i64); - -define @intrinsic_vfsgnjn_vv_nxv8f16_nxv8f16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv8f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjn.nxv8f16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnjn.mask.nxv8f16( - , - , - , - , - i64, - i64); - -define @intrinsic_vfsgnjn_mask_vv_nxv8f16_nxv8f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv8f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjn.mask.nxv8f16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnjn.nxv16f16( - , - , - i64); - -define @intrinsic_vfsgnjn_vv_nxv16f16_nxv16f16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv16f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjn.nxv16f16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnjn.mask.nxv16f16( - , - , - , - , - i64, - i64); - -define @intrinsic_vfsgnjn_mask_vv_nxv16f16_nxv16f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv16f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjn.mask.nxv16f16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnjn.nxv32f16( - , - , - i64); - -define @intrinsic_vfsgnjn_vv_nxv32f16_nxv32f16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv32f16_nxv32f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjn.nxv32f16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnjn.mask.nxv32f16( - , - , - , - , - i64, - i64); - -define @intrinsic_vfsgnjn_mask_vv_nxv32f16_nxv32f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv32f16_nxv32f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v16, v24, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjn.mask.nxv32f16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnjn.nxv1f32( - , - , - i64); - -define @intrinsic_vfsgnjn_vv_nxv1f32_nxv1f32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv1f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjn.nxv1f32( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnjn.mask.nxv1f32( - , - , - , - , - i64, - i64); - -define @intrinsic_vfsgnjn_mask_vv_nxv1f32_nxv1f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv1f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjn.mask.nxv1f32( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnjn.nxv2f32( - , - , - i64); - -define @intrinsic_vfsgnjn_vv_nxv2f32_nxv2f32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv2f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjn.nxv2f32( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnjn.mask.nxv2f32( - , - , - , - , - i64, - i64); - -define @intrinsic_vfsgnjn_mask_vv_nxv2f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv2f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjn.mask.nxv2f32( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnjn.nxv4f32( - , - , - i64); - -define @intrinsic_vfsgnjn_vv_nxv4f32_nxv4f32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv4f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjn.nxv4f32( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnjn.mask.nxv4f32( - , - , - , - , - i64, - i64); - -define @intrinsic_vfsgnjn_mask_vv_nxv4f32_nxv4f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv4f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjn.mask.nxv4f32( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnjn.nxv8f32( - , - , - i64); - -define @intrinsic_vfsgnjn_vv_nxv8f32_nxv8f32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv8f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjn.nxv8f32( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnjn.mask.nxv8f32( - , - , - , - , - i64, - i64); - -define @intrinsic_vfsgnjn_mask_vv_nxv8f32_nxv8f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv8f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjn.mask.nxv8f32( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnjn.nxv16f32( - , - , - i64); - -define @intrinsic_vfsgnjn_vv_nxv16f32_nxv16f32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv16f32_nxv16f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjn.nxv16f32( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnjn.mask.nxv16f32( - , - , - , - , - i64, - i64); - -define @intrinsic_vfsgnjn_mask_vv_nxv16f32_nxv16f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv16f32_nxv16f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vl8re32.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v16, v24, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjn.mask.nxv16f32( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnjn.nxv1f64( - , - , - i64); - -define @intrinsic_vfsgnjn_vv_nxv1f64_nxv1f64( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv1f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjn.nxv1f64( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnjn.mask.nxv1f64( - , - , - , - , - i64, - i64); - -define @intrinsic_vfsgnjn_mask_vv_nxv1f64_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv1f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjn.mask.nxv1f64( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnjn.nxv2f64( - , - , - i64); - -define @intrinsic_vfsgnjn_vv_nxv2f64_nxv2f64( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv2f64_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjn.nxv2f64( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnjn.mask.nxv2f64( - , - , - , - , - i64, - i64); - -define @intrinsic_vfsgnjn_mask_vv_nxv2f64_nxv2f64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv2f64_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjn.mask.nxv2f64( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnjn.nxv4f64( - , - , - i64); - -define @intrinsic_vfsgnjn_vv_nxv4f64_nxv4f64( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv4f64_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjn.nxv4f64( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnjn.mask.nxv4f64( - , - , - , - , - i64, - i64); - -define @intrinsic_vfsgnjn_mask_vv_nxv4f64_nxv4f64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv4f64_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjn.mask.nxv4f64( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnjn.nxv8f64( - , - , - i64); - -define @intrinsic_vfsgnjn_vv_nxv8f64_nxv8f64( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv8f64_nxv8f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjn.nxv8f64( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnjn.mask.nxv8f64( - , - , - , - , - i64, - i64); - -define @intrinsic_vfsgnjn_mask_vv_nxv8f64_nxv8f64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv8f64_nxv8f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vl8re64.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v16, v24, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjn.mask.nxv8f64( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnjn.nxv1f16.f16( - , - half, - i64); - -define @intrinsic_vfsgnjn_vf_nxv1f16_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv1f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfsgnjn.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjn.nxv1f16.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnjn.mask.nxv1f16.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfsgnjn_mask_vf_nxv1f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv1f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfsgnjn.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjn.mask.nxv1f16.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnjn.nxv2f16.f16( - , - half, - i64); - -define @intrinsic_vfsgnjn_vf_nxv2f16_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv2f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfsgnjn.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjn.nxv2f16.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnjn.mask.nxv2f16.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfsgnjn_mask_vf_nxv2f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv2f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfsgnjn.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjn.mask.nxv2f16.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnjn.nxv4f16.f16( - , - half, - i64); - -define @intrinsic_vfsgnjn_vf_nxv4f16_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv4f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfsgnjn.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjn.nxv4f16.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnjn.mask.nxv4f16.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfsgnjn_mask_vf_nxv4f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv4f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfsgnjn.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjn.mask.nxv4f16.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnjn.nxv8f16.f16( - , - half, - i64); - -define @intrinsic_vfsgnjn_vf_nxv8f16_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv8f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfsgnjn.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjn.nxv8f16.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnjn.mask.nxv8f16.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfsgnjn_mask_vf_nxv8f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv8f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfsgnjn.vf v8, v10, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjn.mask.nxv8f16.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnjn.nxv16f16.f16( - , - half, - i64); - -define @intrinsic_vfsgnjn_vf_nxv16f16_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv16f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfsgnjn.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjn.nxv16f16.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnjn.mask.nxv16f16.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfsgnjn_mask_vf_nxv16f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv16f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfsgnjn.vf v8, v12, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjn.mask.nxv16f16.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnjn.nxv32f16.f16( - , - half, - i64); - -define @intrinsic_vfsgnjn_vf_nxv32f16_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv32f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu -; CHECK-NEXT: vfsgnjn.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjn.nxv32f16.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnjn.mask.nxv32f16.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfsgnjn_mask_vf_nxv32f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv32f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu -; CHECK-NEXT: vfsgnjn.vf v8, v16, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjn.mask.nxv32f16.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnjn.nxv1f32.f32( - , - float, - i64); - -define @intrinsic_vfsgnjn_vf_nxv1f32_f32( %0, float %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv1f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfsgnjn.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjn.nxv1f32.f32( - %0, - float %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnjn.mask.nxv1f32.f32( - , - , - float, - , - i64, - i64); - -define @intrinsic_vfsgnjn_mask_vf_nxv1f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv1f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfsgnjn.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjn.mask.nxv1f32.f32( - %0, - %1, - float %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnjn.nxv2f32.f32( - , - float, - i64); - -define @intrinsic_vfsgnjn_vf_nxv2f32_f32( %0, float %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv2f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfsgnjn.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjn.nxv2f32.f32( - %0, - float %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnjn.mask.nxv2f32.f32( - , - , - float, - , - i64, - i64); - -define @intrinsic_vfsgnjn_mask_vf_nxv2f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv2f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfsgnjn.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjn.mask.nxv2f32.f32( - %0, - %1, - float %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnjn.nxv4f32.f32( - , - float, - i64); - -define @intrinsic_vfsgnjn_vf_nxv4f32_f32( %0, float %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv4f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfsgnjn.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjn.nxv4f32.f32( - %0, - float %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnjn.mask.nxv4f32.f32( - , - , - float, - , - i64, - i64); - -define @intrinsic_vfsgnjn_mask_vf_nxv4f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv4f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfsgnjn.vf v8, v10, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjn.mask.nxv4f32.f32( - %0, - %1, - float %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnjn.nxv8f32.f32( - , - float, - i64); - -define @intrinsic_vfsgnjn_vf_nxv8f32_f32( %0, float %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv8f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfsgnjn.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjn.nxv8f32.f32( - %0, - float %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnjn.mask.nxv8f32.f32( - , - , - float, - , - i64, - i64); - -define @intrinsic_vfsgnjn_mask_vf_nxv8f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv8f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfsgnjn.vf v8, v12, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjn.mask.nxv8f32.f32( - %0, - %1, - float %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnjn.nxv16f32.f32( - , - float, - i64); - -define @intrinsic_vfsgnjn_vf_nxv16f32_f32( %0, float %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv16f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu -; CHECK-NEXT: vfsgnjn.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjn.nxv16f32.f32( - %0, - float %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnjn.mask.nxv16f32.f32( - , - , - float, - , - i64, - i64); - -define @intrinsic_vfsgnjn_mask_vf_nxv16f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv16f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu -; CHECK-NEXT: vfsgnjn.vf v8, v16, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjn.mask.nxv16f32.f32( - %0, - %1, - float %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnjn.nxv1f64.f64( - , - double, - i64); - -define @intrinsic_vfsgnjn_vf_nxv1f64_f64( %0, double %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv1f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vfsgnjn.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjn.nxv1f64.f64( - %0, - double %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnjn.mask.nxv1f64.f64( - , - , - double, - , - i64, - i64); - -define @intrinsic_vfsgnjn_mask_vf_nxv1f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv1f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vfsgnjn.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjn.mask.nxv1f64.f64( - %0, - %1, - double %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnjn.nxv2f64.f64( - , - double, - i64); - -define @intrinsic_vfsgnjn_vf_nxv2f64_f64( %0, double %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv2f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vfsgnjn.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjn.nxv2f64.f64( - %0, - double %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnjn.mask.nxv2f64.f64( - , - , - double, - , - i64, - i64); - -define @intrinsic_vfsgnjn_mask_vf_nxv2f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv2f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vfsgnjn.vf v8, v10, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjn.mask.nxv2f64.f64( - %0, - %1, - double %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnjn.nxv4f64.f64( - , - double, - i64); - -define @intrinsic_vfsgnjn_vf_nxv4f64_f64( %0, double %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv4f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vfsgnjn.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjn.nxv4f64.f64( - %0, - double %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnjn.mask.nxv4f64.f64( - , - , - double, - , - i64, - i64); - -define @intrinsic_vfsgnjn_mask_vf_nxv4f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv4f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vfsgnjn.vf v8, v12, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjn.mask.nxv4f64.f64( - %0, - %1, - double %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnjn.nxv8f64.f64( - , - double, - i64); - -define @intrinsic_vfsgnjn_vf_nxv8f64_f64( %0, double %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv8f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu -; CHECK-NEXT: vfsgnjn.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjn.nxv8f64.f64( - %0, - double %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnjn.mask.nxv8f64.f64( - , - , - double, - , - i64, - i64); - -define @intrinsic_vfsgnjn_mask_vf_nxv8f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv8f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu -; CHECK-NEXT: vfsgnjn.vf v8, v16, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjn.mask.nxv8f64.f64( - %0, - %1, - double %2, - %3, - i64 %4, i64 1) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsgnjn-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfsgnjn.ll similarity index 86% rename from llvm/test/CodeGen/RISCV/rvv/vfsgnjn-rv32.ll rename to llvm/test/CodeGen/RISCV/rvv/vfsgnjn.ll index 0287f9ea2cbf30e2629b877c61f5c212938397dc..f16c8a6db1fb9b97bb64a6c5007c82433ef16b3e 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfsgnjn-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfsgnjn.ll @@ -1,12 +1,14 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: -target-abi=ilp32d < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfsgnjn.nxv1f16.nxv1f16( , , - i32); + iXLen); -define @intrinsic_vfsgnjn_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, i32 %2) nounwind { +define @intrinsic_vfsgnjn_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -16,7 +18,7 @@ entry: %a = call @llvm.riscv.vfsgnjn.nxv1f16.nxv1f16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -26,10 +28,10 @@ declare @llvm.riscv.vfsgnjn.mask.nxv1f16.nxv1f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnjn_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnjn_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -41,7 +43,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -49,9 +51,9 @@ entry: declare @llvm.riscv.vfsgnjn.nxv2f16.nxv2f16( , , - i32); + iXLen); -define @intrinsic_vfsgnjn_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, i32 %2) nounwind { +define @intrinsic_vfsgnjn_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -61,7 +63,7 @@ entry: %a = call @llvm.riscv.vfsgnjn.nxv2f16.nxv2f16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -71,10 +73,10 @@ declare @llvm.riscv.vfsgnjn.mask.nxv2f16.nxv2f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnjn_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnjn_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -86,7 +88,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -94,9 +96,9 @@ entry: declare @llvm.riscv.vfsgnjn.nxv4f16.nxv4f16( , , - i32); + iXLen); -define @intrinsic_vfsgnjn_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, i32 %2) nounwind { +define @intrinsic_vfsgnjn_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -106,7 +108,7 @@ entry: %a = call @llvm.riscv.vfsgnjn.nxv4f16.nxv4f16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -116,10 +118,10 @@ declare @llvm.riscv.vfsgnjn.mask.nxv4f16.nxv4f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnjn_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnjn_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -131,7 +133,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -139,9 +141,9 @@ entry: declare @llvm.riscv.vfsgnjn.nxv8f16.nxv8f16( , , - i32); + iXLen); -define @intrinsic_vfsgnjn_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, i32 %2) nounwind { +define @intrinsic_vfsgnjn_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -151,7 +153,7 @@ entry: %a = call @llvm.riscv.vfsgnjn.nxv8f16.nxv8f16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -161,10 +163,10 @@ declare @llvm.riscv.vfsgnjn.mask.nxv8f16.nxv8f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnjn_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnjn_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -176,7 +178,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -184,9 +186,9 @@ entry: declare @llvm.riscv.vfsgnjn.nxv16f16.nxv16f16( , , - i32); + iXLen); -define @intrinsic_vfsgnjn_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, i32 %2) nounwind { +define @intrinsic_vfsgnjn_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -196,7 +198,7 @@ entry: %a = call @llvm.riscv.vfsgnjn.nxv16f16.nxv16f16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -206,10 +208,10 @@ declare @llvm.riscv.vfsgnjn.mask.nxv16f16.nxv16f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnjn_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnjn_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -221,7 +223,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -229,9 +231,9 @@ entry: declare @llvm.riscv.vfsgnjn.nxv32f16.nxv32f16( , , - i32); + iXLen); -define @intrinsic_vfsgnjn_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, i32 %2) nounwind { +define @intrinsic_vfsgnjn_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv32f16_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu @@ -241,7 +243,7 @@ entry: %a = call @llvm.riscv.vfsgnjn.nxv32f16.nxv32f16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -251,10 +253,10 @@ declare @llvm.riscv.vfsgnjn.mask.nxv32f16.nxv32f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnjn_mask_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnjn_mask_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv32f16_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re16.v v24, (a0) @@ -267,7 +269,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -275,9 +277,9 @@ entry: declare @llvm.riscv.vfsgnjn.nxv1f32.nxv1f32( , , - i32); + iXLen); -define @intrinsic_vfsgnjn_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, i32 %2) nounwind { +define @intrinsic_vfsgnjn_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -287,7 +289,7 @@ entry: %a = call @llvm.riscv.vfsgnjn.nxv1f32.nxv1f32( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -297,10 +299,10 @@ declare @llvm.riscv.vfsgnjn.mask.nxv1f32.nxv1f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnjn_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnjn_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -312,7 +314,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -320,9 +322,9 @@ entry: declare @llvm.riscv.vfsgnjn.nxv2f32.nxv2f32( , , - i32); + iXLen); -define @intrinsic_vfsgnjn_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, i32 %2) nounwind { +define @intrinsic_vfsgnjn_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -332,7 +334,7 @@ entry: %a = call @llvm.riscv.vfsgnjn.nxv2f32.nxv2f32( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -342,10 +344,10 @@ declare @llvm.riscv.vfsgnjn.mask.nxv2f32.nxv2f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnjn_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnjn_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -357,7 +359,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -365,9 +367,9 @@ entry: declare @llvm.riscv.vfsgnjn.nxv4f32.nxv4f32( , , - i32); + iXLen); -define @intrinsic_vfsgnjn_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, i32 %2) nounwind { +define @intrinsic_vfsgnjn_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -377,7 +379,7 @@ entry: %a = call @llvm.riscv.vfsgnjn.nxv4f32.nxv4f32( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -387,10 +389,10 @@ declare @llvm.riscv.vfsgnjn.mask.nxv4f32.nxv4f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnjn_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnjn_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -402,7 +404,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -410,9 +412,9 @@ entry: declare @llvm.riscv.vfsgnjn.nxv8f32.nxv8f32( , , - i32); + iXLen); -define @intrinsic_vfsgnjn_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, i32 %2) nounwind { +define @intrinsic_vfsgnjn_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -422,7 +424,7 @@ entry: %a = call @llvm.riscv.vfsgnjn.nxv8f32.nxv8f32( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -432,10 +434,10 @@ declare @llvm.riscv.vfsgnjn.mask.nxv8f32.nxv8f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnjn_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnjn_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -447,7 +449,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -455,9 +457,9 @@ entry: declare @llvm.riscv.vfsgnjn.nxv16f32.nxv16f32( , , - i32); + iXLen); -define @intrinsic_vfsgnjn_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, i32 %2) nounwind { +define @intrinsic_vfsgnjn_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv16f32_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu @@ -467,7 +469,7 @@ entry: %a = call @llvm.riscv.vfsgnjn.nxv16f32.nxv16f32( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -477,10 +479,10 @@ declare @llvm.riscv.vfsgnjn.mask.nxv16f32.nxv16f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnjn_mask_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnjn_mask_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv16f32_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re32.v v24, (a0) @@ -493,7 +495,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -501,9 +503,9 @@ entry: declare @llvm.riscv.vfsgnjn.nxv1f64.nxv1f64( , , - i32); + iXLen); -define @intrinsic_vfsgnjn_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, i32 %2) nounwind { +define @intrinsic_vfsgnjn_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu @@ -513,7 +515,7 @@ entry: %a = call @llvm.riscv.vfsgnjn.nxv1f64.nxv1f64( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -523,10 +525,10 @@ declare @llvm.riscv.vfsgnjn.mask.nxv1f64.nxv1f64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnjn_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnjn_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu @@ -538,7 +540,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -546,9 +548,9 @@ entry: declare @llvm.riscv.vfsgnjn.nxv2f64.nxv2f64( , , - i32); + iXLen); -define @intrinsic_vfsgnjn_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, i32 %2) nounwind { +define @intrinsic_vfsgnjn_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu @@ -558,7 +560,7 @@ entry: %a = call @llvm.riscv.vfsgnjn.nxv2f64.nxv2f64( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -568,10 +570,10 @@ declare @llvm.riscv.vfsgnjn.mask.nxv2f64.nxv2f64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnjn_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnjn_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu @@ -583,7 +585,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -591,9 +593,9 @@ entry: declare @llvm.riscv.vfsgnjn.nxv4f64.nxv4f64( , , - i32); + iXLen); -define @intrinsic_vfsgnjn_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, i32 %2) nounwind { +define @intrinsic_vfsgnjn_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu @@ -603,7 +605,7 @@ entry: %a = call @llvm.riscv.vfsgnjn.nxv4f64.nxv4f64( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -613,10 +615,10 @@ declare @llvm.riscv.vfsgnjn.mask.nxv4f64.nxv4f64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnjn_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnjn_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu @@ -628,7 +630,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -636,9 +638,9 @@ entry: declare @llvm.riscv.vfsgnjn.nxv8f64.nxv8f64( , , - i32); + iXLen); -define @intrinsic_vfsgnjn_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, i32 %2) nounwind { +define @intrinsic_vfsgnjn_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv8f64_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu @@ -648,7 +650,7 @@ entry: %a = call @llvm.riscv.vfsgnjn.nxv8f64.nxv8f64( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -658,10 +660,10 @@ declare @llvm.riscv.vfsgnjn.mask.nxv8f64.nxv8f64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnjn_mask_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnjn_mask_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv8f64_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re64.v v24, (a0) @@ -674,7 +676,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -682,9 +684,9 @@ entry: declare @llvm.riscv.vfsgnjn.nxv1f16.f16( , half, - i32); + iXLen); -define @intrinsic_vfsgnjn_vf_nxv1f16_nxv1f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfsgnjn_vf_nxv1f16_nxv1f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -694,7 +696,7 @@ entry: %a = call @llvm.riscv.vfsgnjn.nxv1f16.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -704,10 +706,10 @@ declare @llvm.riscv.vfsgnjn.mask.nxv1f16.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnjn_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnjn_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -719,7 +721,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -727,9 +729,9 @@ entry: declare @llvm.riscv.vfsgnjn.nxv2f16.f16( , half, - i32); + iXLen); -define @intrinsic_vfsgnjn_vf_nxv2f16_nxv2f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfsgnjn_vf_nxv2f16_nxv2f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -739,7 +741,7 @@ entry: %a = call @llvm.riscv.vfsgnjn.nxv2f16.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -749,10 +751,10 @@ declare @llvm.riscv.vfsgnjn.mask.nxv2f16.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnjn_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnjn_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -764,7 +766,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -772,9 +774,9 @@ entry: declare @llvm.riscv.vfsgnjn.nxv4f16.f16( , half, - i32); + iXLen); -define @intrinsic_vfsgnjn_vf_nxv4f16_nxv4f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfsgnjn_vf_nxv4f16_nxv4f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -784,7 +786,7 @@ entry: %a = call @llvm.riscv.vfsgnjn.nxv4f16.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -794,10 +796,10 @@ declare @llvm.riscv.vfsgnjn.mask.nxv4f16.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnjn_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnjn_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -809,7 +811,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -817,9 +819,9 @@ entry: declare @llvm.riscv.vfsgnjn.nxv8f16.f16( , half, - i32); + iXLen); -define @intrinsic_vfsgnjn_vf_nxv8f16_nxv8f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfsgnjn_vf_nxv8f16_nxv8f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -829,7 +831,7 @@ entry: %a = call @llvm.riscv.vfsgnjn.nxv8f16.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -839,10 +841,10 @@ declare @llvm.riscv.vfsgnjn.mask.nxv8f16.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnjn_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnjn_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -854,7 +856,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -862,9 +864,9 @@ entry: declare @llvm.riscv.vfsgnjn.nxv16f16.f16( , half, - i32); + iXLen); -define @intrinsic_vfsgnjn_vf_nxv16f16_nxv16f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfsgnjn_vf_nxv16f16_nxv16f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -874,7 +876,7 @@ entry: %a = call @llvm.riscv.vfsgnjn.nxv16f16.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -884,10 +886,10 @@ declare @llvm.riscv.vfsgnjn.mask.nxv16f16.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnjn_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnjn_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -899,7 +901,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -907,9 +909,9 @@ entry: declare @llvm.riscv.vfsgnjn.nxv32f16.f16( , half, - i32); + iXLen); -define @intrinsic_vfsgnjn_vf_nxv32f16_nxv32f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfsgnjn_vf_nxv32f16_nxv32f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu @@ -919,7 +921,7 @@ entry: %a = call @llvm.riscv.vfsgnjn.nxv32f16.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -929,10 +931,10 @@ declare @llvm.riscv.vfsgnjn.mask.nxv32f16.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnjn_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnjn_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu @@ -944,7 +946,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -952,9 +954,9 @@ entry: declare @llvm.riscv.vfsgnjn.nxv1f32.f32( , float, - i32); + iXLen); -define @intrinsic_vfsgnjn_vf_nxv1f32_nxv1f32_f32( %0, float %1, i32 %2) nounwind { +define @intrinsic_vfsgnjn_vf_nxv1f32_nxv1f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -964,7 +966,7 @@ entry: %a = call @llvm.riscv.vfsgnjn.nxv1f32.f32( %0, float %1, - i32 %2) + iXLen %2) ret %a } @@ -974,10 +976,10 @@ declare @llvm.riscv.vfsgnjn.mask.nxv1f32.f32( , float, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnjn_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnjn_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -989,7 +991,7 @@ entry: %1, float %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -997,9 +999,9 @@ entry: declare @llvm.riscv.vfsgnjn.nxv2f32.f32( , float, - i32); + iXLen); -define @intrinsic_vfsgnjn_vf_nxv2f32_nxv2f32_f32( %0, float %1, i32 %2) nounwind { +define @intrinsic_vfsgnjn_vf_nxv2f32_nxv2f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -1009,7 +1011,7 @@ entry: %a = call @llvm.riscv.vfsgnjn.nxv2f32.f32( %0, float %1, - i32 %2) + iXLen %2) ret %a } @@ -1019,10 +1021,10 @@ declare @llvm.riscv.vfsgnjn.mask.nxv2f32.f32( , float, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnjn_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnjn_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -1034,7 +1036,7 @@ entry: %1, float %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1042,9 +1044,9 @@ entry: declare @llvm.riscv.vfsgnjn.nxv4f32.f32( , float, - i32); + iXLen); -define @intrinsic_vfsgnjn_vf_nxv4f32_nxv4f32_f32( %0, float %1, i32 %2) nounwind { +define @intrinsic_vfsgnjn_vf_nxv4f32_nxv4f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -1054,7 +1056,7 @@ entry: %a = call @llvm.riscv.vfsgnjn.nxv4f32.f32( %0, float %1, - i32 %2) + iXLen %2) ret %a } @@ -1064,10 +1066,10 @@ declare @llvm.riscv.vfsgnjn.mask.nxv4f32.f32( , float, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnjn_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnjn_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -1079,7 +1081,7 @@ entry: %1, float %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1087,9 +1089,9 @@ entry: declare @llvm.riscv.vfsgnjn.nxv8f32.f32( , float, - i32); + iXLen); -define @intrinsic_vfsgnjn_vf_nxv8f32_nxv8f32_f32( %0, float %1, i32 %2) nounwind { +define @intrinsic_vfsgnjn_vf_nxv8f32_nxv8f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -1099,7 +1101,7 @@ entry: %a = call @llvm.riscv.vfsgnjn.nxv8f32.f32( %0, float %1, - i32 %2) + iXLen %2) ret %a } @@ -1109,10 +1111,10 @@ declare @llvm.riscv.vfsgnjn.mask.nxv8f32.f32( , float, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnjn_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnjn_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -1124,7 +1126,7 @@ entry: %1, float %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1132,9 +1134,9 @@ entry: declare @llvm.riscv.vfsgnjn.nxv16f32.f32( , float, - i32); + iXLen); -define @intrinsic_vfsgnjn_vf_nxv16f32_nxv16f32_f32( %0, float %1, i32 %2) nounwind { +define @intrinsic_vfsgnjn_vf_nxv16f32_nxv16f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu @@ -1144,7 +1146,7 @@ entry: %a = call @llvm.riscv.vfsgnjn.nxv16f32.f32( %0, float %1, - i32 %2) + iXLen %2) ret %a } @@ -1154,10 +1156,10 @@ declare @llvm.riscv.vfsgnjn.mask.nxv16f32.f32( , float, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnjn_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnjn_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu @@ -1169,7 +1171,7 @@ entry: %1, float %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1177,9 +1179,9 @@ entry: declare @llvm.riscv.vfsgnjn.nxv1f64.f64( , double, - i32); + iXLen); -define @intrinsic_vfsgnjn_vf_nxv1f64_nxv1f64_f64( %0, double %1, i32 %2) nounwind { +define @intrinsic_vfsgnjn_vf_nxv1f64_nxv1f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu @@ -1189,7 +1191,7 @@ entry: %a = call @llvm.riscv.vfsgnjn.nxv1f64.f64( %0, double %1, - i32 %2) + iXLen %2) ret %a } @@ -1199,10 +1201,10 @@ declare @llvm.riscv.vfsgnjn.mask.nxv1f64.f64( , double, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnjn_mask_vf_nxv1f64_nxv1f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnjn_mask_vf_nxv1f64_nxv1f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu @@ -1214,7 +1216,7 @@ entry: %1, double %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1222,9 +1224,9 @@ entry: declare @llvm.riscv.vfsgnjn.nxv2f64.f64( , double, - i32); + iXLen); -define @intrinsic_vfsgnjn_vf_nxv2f64_nxv2f64_f64( %0, double %1, i32 %2) nounwind { +define @intrinsic_vfsgnjn_vf_nxv2f64_nxv2f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu @@ -1234,7 +1236,7 @@ entry: %a = call @llvm.riscv.vfsgnjn.nxv2f64.f64( %0, double %1, - i32 %2) + iXLen %2) ret %a } @@ -1244,10 +1246,10 @@ declare @llvm.riscv.vfsgnjn.mask.nxv2f64.f64( , double, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnjn_mask_vf_nxv2f64_nxv2f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnjn_mask_vf_nxv2f64_nxv2f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu @@ -1259,7 +1261,7 @@ entry: %1, double %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1267,9 +1269,9 @@ entry: declare @llvm.riscv.vfsgnjn.nxv4f64.f64( , double, - i32); + iXLen); -define @intrinsic_vfsgnjn_vf_nxv4f64_nxv4f64_f64( %0, double %1, i32 %2) nounwind { +define @intrinsic_vfsgnjn_vf_nxv4f64_nxv4f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu @@ -1279,7 +1281,7 @@ entry: %a = call @llvm.riscv.vfsgnjn.nxv4f64.f64( %0, double %1, - i32 %2) + iXLen %2) ret %a } @@ -1289,10 +1291,10 @@ declare @llvm.riscv.vfsgnjn.mask.nxv4f64.f64( , double, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnjn_mask_vf_nxv4f64_nxv4f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnjn_mask_vf_nxv4f64_nxv4f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu @@ -1304,7 +1306,7 @@ entry: %1, double %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1312,9 +1314,9 @@ entry: declare @llvm.riscv.vfsgnjn.nxv8f64.f64( , double, - i32); + iXLen); -define @intrinsic_vfsgnjn_vf_nxv8f64_nxv8f64_f64( %0, double %1, i32 %2) nounwind { +define @intrinsic_vfsgnjn_vf_nxv8f64_nxv8f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu @@ -1324,7 +1326,7 @@ entry: %a = call @llvm.riscv.vfsgnjn.nxv8f64.f64( %0, double %1, - i32 %2) + iXLen %2) ret %a } @@ -1334,10 +1336,10 @@ declare @llvm.riscv.vfsgnjn.mask.nxv8f64.f64( , double, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnjn_mask_vf_nxv8f64_nxv8f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnjn_mask_vf_nxv8f64_nxv8f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu @@ -1349,7 +1351,7 @@ entry: %1, double %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsgnjx-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfsgnjx-rv64.ll deleted file mode 100644 index 4ae69f0d4f613f7db40b7da2158e5e3a8c543a2e..0000000000000000000000000000000000000000 --- a/llvm/test/CodeGen/RISCV/rvv/vfsgnjx-rv64.ll +++ /dev/null @@ -1,1355 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: -target-abi=lp64d < %s | FileCheck %s -declare @llvm.riscv.vfsgnjx.nxv1f16( - , - , - i64); - -define @intrinsic_vfsgnjx_vv_nxv1f16_nxv1f16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv1f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfsgnjx.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjx.nxv1f16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnjx.mask.nxv1f16( - , - , - , - , - i64, - i64); - -define @intrinsic_vfsgnjx_mask_vv_nxv1f16_nxv1f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv1f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfsgnjx.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjx.mask.nxv1f16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnjx.nxv2f16( - , - , - i64); - -define @intrinsic_vfsgnjx_vv_nxv2f16_nxv2f16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv2f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfsgnjx.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjx.nxv2f16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnjx.mask.nxv2f16( - , - , - , - , - i64, - i64); - -define @intrinsic_vfsgnjx_mask_vv_nxv2f16_nxv2f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv2f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfsgnjx.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjx.mask.nxv2f16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnjx.nxv4f16( - , - , - i64); - -define @intrinsic_vfsgnjx_vv_nxv4f16_nxv4f16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv4f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfsgnjx.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjx.nxv4f16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnjx.mask.nxv4f16( - , - , - , - , - i64, - i64); - -define @intrinsic_vfsgnjx_mask_vv_nxv4f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv4f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfsgnjx.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjx.mask.nxv4f16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnjx.nxv8f16( - , - , - i64); - -define @intrinsic_vfsgnjx_vv_nxv8f16_nxv8f16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv8f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfsgnjx.vv v8, v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjx.nxv8f16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnjx.mask.nxv8f16( - , - , - , - , - i64, - i64); - -define @intrinsic_vfsgnjx_mask_vv_nxv8f16_nxv8f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv8f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfsgnjx.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjx.mask.nxv8f16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnjx.nxv16f16( - , - , - i64); - -define @intrinsic_vfsgnjx_vv_nxv16f16_nxv16f16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv16f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfsgnjx.vv v8, v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjx.nxv16f16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnjx.mask.nxv16f16( - , - , - , - , - i64, - i64); - -define @intrinsic_vfsgnjx_mask_vv_nxv16f16_nxv16f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv16f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfsgnjx.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjx.mask.nxv16f16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnjx.nxv32f16( - , - , - i64); - -define @intrinsic_vfsgnjx_vv_nxv32f16_nxv32f16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv32f16_nxv32f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu -; CHECK-NEXT: vfsgnjx.vv v8, v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjx.nxv32f16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnjx.mask.nxv32f16( - , - , - , - , - i64, - i64); - -define @intrinsic_vfsgnjx_mask_vv_nxv32f16_nxv32f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv32f16_nxv32f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu -; CHECK-NEXT: vfsgnjx.vv v8, v16, v24, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjx.mask.nxv32f16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnjx.nxv1f32( - , - , - i64); - -define @intrinsic_vfsgnjx_vv_nxv1f32_nxv1f32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv1f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfsgnjx.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjx.nxv1f32( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnjx.mask.nxv1f32( - , - , - , - , - i64, - i64); - -define @intrinsic_vfsgnjx_mask_vv_nxv1f32_nxv1f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv1f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfsgnjx.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjx.mask.nxv1f32( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnjx.nxv2f32( - , - , - i64); - -define @intrinsic_vfsgnjx_vv_nxv2f32_nxv2f32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv2f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfsgnjx.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjx.nxv2f32( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnjx.mask.nxv2f32( - , - , - , - , - i64, - i64); - -define @intrinsic_vfsgnjx_mask_vv_nxv2f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv2f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfsgnjx.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjx.mask.nxv2f32( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnjx.nxv4f32( - , - , - i64); - -define @intrinsic_vfsgnjx_vv_nxv4f32_nxv4f32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv4f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfsgnjx.vv v8, v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjx.nxv4f32( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnjx.mask.nxv4f32( - , - , - , - , - i64, - i64); - -define @intrinsic_vfsgnjx_mask_vv_nxv4f32_nxv4f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv4f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfsgnjx.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjx.mask.nxv4f32( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnjx.nxv8f32( - , - , - i64); - -define @intrinsic_vfsgnjx_vv_nxv8f32_nxv8f32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv8f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfsgnjx.vv v8, v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjx.nxv8f32( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnjx.mask.nxv8f32( - , - , - , - , - i64, - i64); - -define @intrinsic_vfsgnjx_mask_vv_nxv8f32_nxv8f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv8f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfsgnjx.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjx.mask.nxv8f32( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnjx.nxv16f32( - , - , - i64); - -define @intrinsic_vfsgnjx_vv_nxv16f32_nxv16f32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv16f32_nxv16f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu -; CHECK-NEXT: vfsgnjx.vv v8, v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjx.nxv16f32( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnjx.mask.nxv16f32( - , - , - , - , - i64, - i64); - -define @intrinsic_vfsgnjx_mask_vv_nxv16f32_nxv16f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv16f32_nxv16f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vl8re32.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu -; CHECK-NEXT: vfsgnjx.vv v8, v16, v24, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjx.mask.nxv16f32( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnjx.nxv1f64( - , - , - i64); - -define @intrinsic_vfsgnjx_vv_nxv1f64_nxv1f64( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv1f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vfsgnjx.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjx.nxv1f64( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnjx.mask.nxv1f64( - , - , - , - , - i64, - i64); - -define @intrinsic_vfsgnjx_mask_vv_nxv1f64_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv1f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vfsgnjx.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjx.mask.nxv1f64( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnjx.nxv2f64( - , - , - i64); - -define @intrinsic_vfsgnjx_vv_nxv2f64_nxv2f64( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv2f64_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vfsgnjx.vv v8, v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjx.nxv2f64( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnjx.mask.nxv2f64( - , - , - , - , - i64, - i64); - -define @intrinsic_vfsgnjx_mask_vv_nxv2f64_nxv2f64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv2f64_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vfsgnjx.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjx.mask.nxv2f64( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnjx.nxv4f64( - , - , - i64); - -define @intrinsic_vfsgnjx_vv_nxv4f64_nxv4f64( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv4f64_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vfsgnjx.vv v8, v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjx.nxv4f64( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnjx.mask.nxv4f64( - , - , - , - , - i64, - i64); - -define @intrinsic_vfsgnjx_mask_vv_nxv4f64_nxv4f64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv4f64_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vfsgnjx.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjx.mask.nxv4f64( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnjx.nxv8f64( - , - , - i64); - -define @intrinsic_vfsgnjx_vv_nxv8f64_nxv8f64( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv8f64_nxv8f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu -; CHECK-NEXT: vfsgnjx.vv v8, v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjx.nxv8f64( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnjx.mask.nxv8f64( - , - , - , - , - i64, - i64); - -define @intrinsic_vfsgnjx_mask_vv_nxv8f64_nxv8f64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv8f64_nxv8f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vl8re64.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu -; CHECK-NEXT: vfsgnjx.vv v8, v16, v24, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjx.mask.nxv8f64( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnjx.nxv1f16.f16( - , - half, - i64); - -define @intrinsic_vfsgnjx_vf_nxv1f16_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv1f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfsgnjx.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjx.nxv1f16.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnjx.mask.nxv1f16.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfsgnjx_mask_vf_nxv1f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv1f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfsgnjx.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjx.mask.nxv1f16.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnjx.nxv2f16.f16( - , - half, - i64); - -define @intrinsic_vfsgnjx_vf_nxv2f16_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv2f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfsgnjx.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjx.nxv2f16.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnjx.mask.nxv2f16.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfsgnjx_mask_vf_nxv2f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv2f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfsgnjx.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjx.mask.nxv2f16.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnjx.nxv4f16.f16( - , - half, - i64); - -define @intrinsic_vfsgnjx_vf_nxv4f16_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv4f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfsgnjx.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjx.nxv4f16.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnjx.mask.nxv4f16.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfsgnjx_mask_vf_nxv4f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv4f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfsgnjx.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjx.mask.nxv4f16.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnjx.nxv8f16.f16( - , - half, - i64); - -define @intrinsic_vfsgnjx_vf_nxv8f16_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv8f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfsgnjx.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjx.nxv8f16.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnjx.mask.nxv8f16.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfsgnjx_mask_vf_nxv8f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv8f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfsgnjx.vf v8, v10, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjx.mask.nxv8f16.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnjx.nxv16f16.f16( - , - half, - i64); - -define @intrinsic_vfsgnjx_vf_nxv16f16_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv16f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfsgnjx.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjx.nxv16f16.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnjx.mask.nxv16f16.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfsgnjx_mask_vf_nxv16f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv16f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfsgnjx.vf v8, v12, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjx.mask.nxv16f16.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnjx.nxv32f16.f16( - , - half, - i64); - -define @intrinsic_vfsgnjx_vf_nxv32f16_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv32f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu -; CHECK-NEXT: vfsgnjx.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjx.nxv32f16.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnjx.mask.nxv32f16.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfsgnjx_mask_vf_nxv32f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv32f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu -; CHECK-NEXT: vfsgnjx.vf v8, v16, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjx.mask.nxv32f16.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnjx.nxv1f32.f32( - , - float, - i64); - -define @intrinsic_vfsgnjx_vf_nxv1f32_f32( %0, float %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv1f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfsgnjx.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjx.nxv1f32.f32( - %0, - float %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnjx.mask.nxv1f32.f32( - , - , - float, - , - i64, - i64); - -define @intrinsic_vfsgnjx_mask_vf_nxv1f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv1f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfsgnjx.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjx.mask.nxv1f32.f32( - %0, - %1, - float %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnjx.nxv2f32.f32( - , - float, - i64); - -define @intrinsic_vfsgnjx_vf_nxv2f32_f32( %0, float %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv2f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfsgnjx.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjx.nxv2f32.f32( - %0, - float %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnjx.mask.nxv2f32.f32( - , - , - float, - , - i64, - i64); - -define @intrinsic_vfsgnjx_mask_vf_nxv2f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv2f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfsgnjx.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjx.mask.nxv2f32.f32( - %0, - %1, - float %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnjx.nxv4f32.f32( - , - float, - i64); - -define @intrinsic_vfsgnjx_vf_nxv4f32_f32( %0, float %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv4f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfsgnjx.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjx.nxv4f32.f32( - %0, - float %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnjx.mask.nxv4f32.f32( - , - , - float, - , - i64, - i64); - -define @intrinsic_vfsgnjx_mask_vf_nxv4f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv4f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfsgnjx.vf v8, v10, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjx.mask.nxv4f32.f32( - %0, - %1, - float %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnjx.nxv8f32.f32( - , - float, - i64); - -define @intrinsic_vfsgnjx_vf_nxv8f32_f32( %0, float %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv8f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfsgnjx.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjx.nxv8f32.f32( - %0, - float %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnjx.mask.nxv8f32.f32( - , - , - float, - , - i64, - i64); - -define @intrinsic_vfsgnjx_mask_vf_nxv8f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv8f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfsgnjx.vf v8, v12, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjx.mask.nxv8f32.f32( - %0, - %1, - float %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnjx.nxv16f32.f32( - , - float, - i64); - -define @intrinsic_vfsgnjx_vf_nxv16f32_f32( %0, float %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv16f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu -; CHECK-NEXT: vfsgnjx.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjx.nxv16f32.f32( - %0, - float %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnjx.mask.nxv16f32.f32( - , - , - float, - , - i64, - i64); - -define @intrinsic_vfsgnjx_mask_vf_nxv16f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv16f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu -; CHECK-NEXT: vfsgnjx.vf v8, v16, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjx.mask.nxv16f32.f32( - %0, - %1, - float %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnjx.nxv1f64.f64( - , - double, - i64); - -define @intrinsic_vfsgnjx_vf_nxv1f64_f64( %0, double %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv1f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vfsgnjx.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjx.nxv1f64.f64( - %0, - double %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnjx.mask.nxv1f64.f64( - , - , - double, - , - i64, - i64); - -define @intrinsic_vfsgnjx_mask_vf_nxv1f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv1f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vfsgnjx.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjx.mask.nxv1f64.f64( - %0, - %1, - double %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnjx.nxv2f64.f64( - , - double, - i64); - -define @intrinsic_vfsgnjx_vf_nxv2f64_f64( %0, double %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv2f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vfsgnjx.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjx.nxv2f64.f64( - %0, - double %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnjx.mask.nxv2f64.f64( - , - , - double, - , - i64, - i64); - -define @intrinsic_vfsgnjx_mask_vf_nxv2f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv2f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vfsgnjx.vf v8, v10, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjx.mask.nxv2f64.f64( - %0, - %1, - double %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnjx.nxv4f64.f64( - , - double, - i64); - -define @intrinsic_vfsgnjx_vf_nxv4f64_f64( %0, double %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv4f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vfsgnjx.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjx.nxv4f64.f64( - %0, - double %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnjx.mask.nxv4f64.f64( - , - , - double, - , - i64, - i64); - -define @intrinsic_vfsgnjx_mask_vf_nxv4f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv4f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vfsgnjx.vf v8, v12, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjx.mask.nxv4f64.f64( - %0, - %1, - double %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnjx.nxv8f64.f64( - , - double, - i64); - -define @intrinsic_vfsgnjx_vf_nxv8f64_f64( %0, double %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv8f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu -; CHECK-NEXT: vfsgnjx.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjx.nxv8f64.f64( - %0, - double %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnjx.mask.nxv8f64.f64( - , - , - double, - , - i64, - i64); - -define @intrinsic_vfsgnjx_mask_vf_nxv8f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv8f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu -; CHECK-NEXT: vfsgnjx.vf v8, v16, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjx.mask.nxv8f64.f64( - %0, - %1, - double %2, - %3, - i64 %4, i64 1) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsgnjx-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfsgnjx.ll similarity index 86% rename from llvm/test/CodeGen/RISCV/rvv/vfsgnjx-rv32.ll rename to llvm/test/CodeGen/RISCV/rvv/vfsgnjx.ll index fba4eceb23fb6912e450f91f09675fb5489f4f5d..edfd578ce8aa016e31fceb9a6953d065b450d69c 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfsgnjx-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfsgnjx.ll @@ -1,12 +1,14 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: -target-abi=ilp32d < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfsgnjx.nxv1f16.nxv1f16( , , - i32); + iXLen); -define @intrinsic_vfsgnjx_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, i32 %2) nounwind { +define @intrinsic_vfsgnjx_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -16,7 +18,7 @@ entry: %a = call @llvm.riscv.vfsgnjx.nxv1f16.nxv1f16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -26,10 +28,10 @@ declare @llvm.riscv.vfsgnjx.mask.nxv1f16.nxv1f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnjx_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnjx_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -41,7 +43,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -49,9 +51,9 @@ entry: declare @llvm.riscv.vfsgnjx.nxv2f16.nxv2f16( , , - i32); + iXLen); -define @intrinsic_vfsgnjx_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, i32 %2) nounwind { +define @intrinsic_vfsgnjx_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -61,7 +63,7 @@ entry: %a = call @llvm.riscv.vfsgnjx.nxv2f16.nxv2f16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -71,10 +73,10 @@ declare @llvm.riscv.vfsgnjx.mask.nxv2f16.nxv2f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnjx_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnjx_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -86,7 +88,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -94,9 +96,9 @@ entry: declare @llvm.riscv.vfsgnjx.nxv4f16.nxv4f16( , , - i32); + iXLen); -define @intrinsic_vfsgnjx_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, i32 %2) nounwind { +define @intrinsic_vfsgnjx_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -106,7 +108,7 @@ entry: %a = call @llvm.riscv.vfsgnjx.nxv4f16.nxv4f16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -116,10 +118,10 @@ declare @llvm.riscv.vfsgnjx.mask.nxv4f16.nxv4f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnjx_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnjx_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -131,7 +133,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -139,9 +141,9 @@ entry: declare @llvm.riscv.vfsgnjx.nxv8f16.nxv8f16( , , - i32); + iXLen); -define @intrinsic_vfsgnjx_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, i32 %2) nounwind { +define @intrinsic_vfsgnjx_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -151,7 +153,7 @@ entry: %a = call @llvm.riscv.vfsgnjx.nxv8f16.nxv8f16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -161,10 +163,10 @@ declare @llvm.riscv.vfsgnjx.mask.nxv8f16.nxv8f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnjx_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnjx_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -176,7 +178,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -184,9 +186,9 @@ entry: declare @llvm.riscv.vfsgnjx.nxv16f16.nxv16f16( , , - i32); + iXLen); -define @intrinsic_vfsgnjx_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, i32 %2) nounwind { +define @intrinsic_vfsgnjx_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -196,7 +198,7 @@ entry: %a = call @llvm.riscv.vfsgnjx.nxv16f16.nxv16f16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -206,10 +208,10 @@ declare @llvm.riscv.vfsgnjx.mask.nxv16f16.nxv16f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnjx_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnjx_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -221,7 +223,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -229,9 +231,9 @@ entry: declare @llvm.riscv.vfsgnjx.nxv32f16.nxv32f16( , , - i32); + iXLen); -define @intrinsic_vfsgnjx_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, i32 %2) nounwind { +define @intrinsic_vfsgnjx_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv32f16_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu @@ -241,7 +243,7 @@ entry: %a = call @llvm.riscv.vfsgnjx.nxv32f16.nxv32f16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -251,10 +253,10 @@ declare @llvm.riscv.vfsgnjx.mask.nxv32f16.nxv32f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnjx_mask_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnjx_mask_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv32f16_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re16.v v24, (a0) @@ -267,7 +269,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -275,9 +277,9 @@ entry: declare @llvm.riscv.vfsgnjx.nxv1f32.nxv1f32( , , - i32); + iXLen); -define @intrinsic_vfsgnjx_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, i32 %2) nounwind { +define @intrinsic_vfsgnjx_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -287,7 +289,7 @@ entry: %a = call @llvm.riscv.vfsgnjx.nxv1f32.nxv1f32( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -297,10 +299,10 @@ declare @llvm.riscv.vfsgnjx.mask.nxv1f32.nxv1f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnjx_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnjx_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -312,7 +314,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -320,9 +322,9 @@ entry: declare @llvm.riscv.vfsgnjx.nxv2f32.nxv2f32( , , - i32); + iXLen); -define @intrinsic_vfsgnjx_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, i32 %2) nounwind { +define @intrinsic_vfsgnjx_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -332,7 +334,7 @@ entry: %a = call @llvm.riscv.vfsgnjx.nxv2f32.nxv2f32( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -342,10 +344,10 @@ declare @llvm.riscv.vfsgnjx.mask.nxv2f32.nxv2f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnjx_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnjx_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -357,7 +359,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -365,9 +367,9 @@ entry: declare @llvm.riscv.vfsgnjx.nxv4f32.nxv4f32( , , - i32); + iXLen); -define @intrinsic_vfsgnjx_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, i32 %2) nounwind { +define @intrinsic_vfsgnjx_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -377,7 +379,7 @@ entry: %a = call @llvm.riscv.vfsgnjx.nxv4f32.nxv4f32( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -387,10 +389,10 @@ declare @llvm.riscv.vfsgnjx.mask.nxv4f32.nxv4f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnjx_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnjx_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -402,7 +404,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -410,9 +412,9 @@ entry: declare @llvm.riscv.vfsgnjx.nxv8f32.nxv8f32( , , - i32); + iXLen); -define @intrinsic_vfsgnjx_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, i32 %2) nounwind { +define @intrinsic_vfsgnjx_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -422,7 +424,7 @@ entry: %a = call @llvm.riscv.vfsgnjx.nxv8f32.nxv8f32( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -432,10 +434,10 @@ declare @llvm.riscv.vfsgnjx.mask.nxv8f32.nxv8f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnjx_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnjx_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -447,7 +449,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -455,9 +457,9 @@ entry: declare @llvm.riscv.vfsgnjx.nxv16f32.nxv16f32( , , - i32); + iXLen); -define @intrinsic_vfsgnjx_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, i32 %2) nounwind { +define @intrinsic_vfsgnjx_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv16f32_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu @@ -467,7 +469,7 @@ entry: %a = call @llvm.riscv.vfsgnjx.nxv16f32.nxv16f32( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -477,10 +479,10 @@ declare @llvm.riscv.vfsgnjx.mask.nxv16f32.nxv16f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnjx_mask_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnjx_mask_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv16f32_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re32.v v24, (a0) @@ -493,7 +495,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -501,9 +503,9 @@ entry: declare @llvm.riscv.vfsgnjx.nxv1f64.nxv1f64( , , - i32); + iXLen); -define @intrinsic_vfsgnjx_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, i32 %2) nounwind { +define @intrinsic_vfsgnjx_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu @@ -513,7 +515,7 @@ entry: %a = call @llvm.riscv.vfsgnjx.nxv1f64.nxv1f64( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -523,10 +525,10 @@ declare @llvm.riscv.vfsgnjx.mask.nxv1f64.nxv1f64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnjx_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnjx_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu @@ -538,7 +540,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -546,9 +548,9 @@ entry: declare @llvm.riscv.vfsgnjx.nxv2f64.nxv2f64( , , - i32); + iXLen); -define @intrinsic_vfsgnjx_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, i32 %2) nounwind { +define @intrinsic_vfsgnjx_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu @@ -558,7 +560,7 @@ entry: %a = call @llvm.riscv.vfsgnjx.nxv2f64.nxv2f64( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -568,10 +570,10 @@ declare @llvm.riscv.vfsgnjx.mask.nxv2f64.nxv2f64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnjx_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnjx_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu @@ -583,7 +585,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -591,9 +593,9 @@ entry: declare @llvm.riscv.vfsgnjx.nxv4f64.nxv4f64( , , - i32); + iXLen); -define @intrinsic_vfsgnjx_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, i32 %2) nounwind { +define @intrinsic_vfsgnjx_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu @@ -603,7 +605,7 @@ entry: %a = call @llvm.riscv.vfsgnjx.nxv4f64.nxv4f64( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -613,10 +615,10 @@ declare @llvm.riscv.vfsgnjx.mask.nxv4f64.nxv4f64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnjx_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnjx_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu @@ -628,7 +630,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -636,9 +638,9 @@ entry: declare @llvm.riscv.vfsgnjx.nxv8f64.nxv8f64( , , - i32); + iXLen); -define @intrinsic_vfsgnjx_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, i32 %2) nounwind { +define @intrinsic_vfsgnjx_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv8f64_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu @@ -648,7 +650,7 @@ entry: %a = call @llvm.riscv.vfsgnjx.nxv8f64.nxv8f64( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -658,10 +660,10 @@ declare @llvm.riscv.vfsgnjx.mask.nxv8f64.nxv8f64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnjx_mask_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnjx_mask_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv8f64_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re64.v v24, (a0) @@ -674,7 +676,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -682,9 +684,9 @@ entry: declare @llvm.riscv.vfsgnjx.nxv1f16.f16( , half, - i32); + iXLen); -define @intrinsic_vfsgnjx_vf_nxv1f16_nxv1f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfsgnjx_vf_nxv1f16_nxv1f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -694,7 +696,7 @@ entry: %a = call @llvm.riscv.vfsgnjx.nxv1f16.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -704,10 +706,10 @@ declare @llvm.riscv.vfsgnjx.mask.nxv1f16.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnjx_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnjx_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -719,7 +721,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -727,9 +729,9 @@ entry: declare @llvm.riscv.vfsgnjx.nxv2f16.f16( , half, - i32); + iXLen); -define @intrinsic_vfsgnjx_vf_nxv2f16_nxv2f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfsgnjx_vf_nxv2f16_nxv2f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -739,7 +741,7 @@ entry: %a = call @llvm.riscv.vfsgnjx.nxv2f16.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -749,10 +751,10 @@ declare @llvm.riscv.vfsgnjx.mask.nxv2f16.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnjx_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnjx_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -764,7 +766,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -772,9 +774,9 @@ entry: declare @llvm.riscv.vfsgnjx.nxv4f16.f16( , half, - i32); + iXLen); -define @intrinsic_vfsgnjx_vf_nxv4f16_nxv4f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfsgnjx_vf_nxv4f16_nxv4f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -784,7 +786,7 @@ entry: %a = call @llvm.riscv.vfsgnjx.nxv4f16.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -794,10 +796,10 @@ declare @llvm.riscv.vfsgnjx.mask.nxv4f16.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnjx_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnjx_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -809,7 +811,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -817,9 +819,9 @@ entry: declare @llvm.riscv.vfsgnjx.nxv8f16.f16( , half, - i32); + iXLen); -define @intrinsic_vfsgnjx_vf_nxv8f16_nxv8f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfsgnjx_vf_nxv8f16_nxv8f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -829,7 +831,7 @@ entry: %a = call @llvm.riscv.vfsgnjx.nxv8f16.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -839,10 +841,10 @@ declare @llvm.riscv.vfsgnjx.mask.nxv8f16.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnjx_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnjx_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -854,7 +856,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -862,9 +864,9 @@ entry: declare @llvm.riscv.vfsgnjx.nxv16f16.f16( , half, - i32); + iXLen); -define @intrinsic_vfsgnjx_vf_nxv16f16_nxv16f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfsgnjx_vf_nxv16f16_nxv16f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -874,7 +876,7 @@ entry: %a = call @llvm.riscv.vfsgnjx.nxv16f16.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -884,10 +886,10 @@ declare @llvm.riscv.vfsgnjx.mask.nxv16f16.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnjx_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnjx_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -899,7 +901,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -907,9 +909,9 @@ entry: declare @llvm.riscv.vfsgnjx.nxv32f16.f16( , half, - i32); + iXLen); -define @intrinsic_vfsgnjx_vf_nxv32f16_nxv32f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfsgnjx_vf_nxv32f16_nxv32f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu @@ -919,7 +921,7 @@ entry: %a = call @llvm.riscv.vfsgnjx.nxv32f16.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -929,10 +931,10 @@ declare @llvm.riscv.vfsgnjx.mask.nxv32f16.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnjx_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnjx_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu @@ -944,7 +946,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -952,9 +954,9 @@ entry: declare @llvm.riscv.vfsgnjx.nxv1f32.f32( , float, - i32); + iXLen); -define @intrinsic_vfsgnjx_vf_nxv1f32_nxv1f32_f32( %0, float %1, i32 %2) nounwind { +define @intrinsic_vfsgnjx_vf_nxv1f32_nxv1f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -964,7 +966,7 @@ entry: %a = call @llvm.riscv.vfsgnjx.nxv1f32.f32( %0, float %1, - i32 %2) + iXLen %2) ret %a } @@ -974,10 +976,10 @@ declare @llvm.riscv.vfsgnjx.mask.nxv1f32.f32( , float, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnjx_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnjx_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -989,7 +991,7 @@ entry: %1, float %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -997,9 +999,9 @@ entry: declare @llvm.riscv.vfsgnjx.nxv2f32.f32( , float, - i32); + iXLen); -define @intrinsic_vfsgnjx_vf_nxv2f32_nxv2f32_f32( %0, float %1, i32 %2) nounwind { +define @intrinsic_vfsgnjx_vf_nxv2f32_nxv2f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -1009,7 +1011,7 @@ entry: %a = call @llvm.riscv.vfsgnjx.nxv2f32.f32( %0, float %1, - i32 %2) + iXLen %2) ret %a } @@ -1019,10 +1021,10 @@ declare @llvm.riscv.vfsgnjx.mask.nxv2f32.f32( , float, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnjx_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnjx_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -1034,7 +1036,7 @@ entry: %1, float %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1042,9 +1044,9 @@ entry: declare @llvm.riscv.vfsgnjx.nxv4f32.f32( , float, - i32); + iXLen); -define @intrinsic_vfsgnjx_vf_nxv4f32_nxv4f32_f32( %0, float %1, i32 %2) nounwind { +define @intrinsic_vfsgnjx_vf_nxv4f32_nxv4f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -1054,7 +1056,7 @@ entry: %a = call @llvm.riscv.vfsgnjx.nxv4f32.f32( %0, float %1, - i32 %2) + iXLen %2) ret %a } @@ -1064,10 +1066,10 @@ declare @llvm.riscv.vfsgnjx.mask.nxv4f32.f32( , float, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnjx_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnjx_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -1079,7 +1081,7 @@ entry: %1, float %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1087,9 +1089,9 @@ entry: declare @llvm.riscv.vfsgnjx.nxv8f32.f32( , float, - i32); + iXLen); -define @intrinsic_vfsgnjx_vf_nxv8f32_nxv8f32_f32( %0, float %1, i32 %2) nounwind { +define @intrinsic_vfsgnjx_vf_nxv8f32_nxv8f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -1099,7 +1101,7 @@ entry: %a = call @llvm.riscv.vfsgnjx.nxv8f32.f32( %0, float %1, - i32 %2) + iXLen %2) ret %a } @@ -1109,10 +1111,10 @@ declare @llvm.riscv.vfsgnjx.mask.nxv8f32.f32( , float, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnjx_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnjx_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -1124,7 +1126,7 @@ entry: %1, float %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1132,9 +1134,9 @@ entry: declare @llvm.riscv.vfsgnjx.nxv16f32.f32( , float, - i32); + iXLen); -define @intrinsic_vfsgnjx_vf_nxv16f32_nxv16f32_f32( %0, float %1, i32 %2) nounwind { +define @intrinsic_vfsgnjx_vf_nxv16f32_nxv16f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu @@ -1144,7 +1146,7 @@ entry: %a = call @llvm.riscv.vfsgnjx.nxv16f32.f32( %0, float %1, - i32 %2) + iXLen %2) ret %a } @@ -1154,10 +1156,10 @@ declare @llvm.riscv.vfsgnjx.mask.nxv16f32.f32( , float, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnjx_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnjx_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu @@ -1169,7 +1171,7 @@ entry: %1, float %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1177,9 +1179,9 @@ entry: declare @llvm.riscv.vfsgnjx.nxv1f64.f64( , double, - i32); + iXLen); -define @intrinsic_vfsgnjx_vf_nxv1f64_nxv1f64_f64( %0, double %1, i32 %2) nounwind { +define @intrinsic_vfsgnjx_vf_nxv1f64_nxv1f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu @@ -1189,7 +1191,7 @@ entry: %a = call @llvm.riscv.vfsgnjx.nxv1f64.f64( %0, double %1, - i32 %2) + iXLen %2) ret %a } @@ -1199,10 +1201,10 @@ declare @llvm.riscv.vfsgnjx.mask.nxv1f64.f64( , double, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnjx_mask_vf_nxv1f64_nxv1f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnjx_mask_vf_nxv1f64_nxv1f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu @@ -1214,7 +1216,7 @@ entry: %1, double %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1222,9 +1224,9 @@ entry: declare @llvm.riscv.vfsgnjx.nxv2f64.f64( , double, - i32); + iXLen); -define @intrinsic_vfsgnjx_vf_nxv2f64_nxv2f64_f64( %0, double %1, i32 %2) nounwind { +define @intrinsic_vfsgnjx_vf_nxv2f64_nxv2f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu @@ -1234,7 +1236,7 @@ entry: %a = call @llvm.riscv.vfsgnjx.nxv2f64.f64( %0, double %1, - i32 %2) + iXLen %2) ret %a } @@ -1244,10 +1246,10 @@ declare @llvm.riscv.vfsgnjx.mask.nxv2f64.f64( , double, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnjx_mask_vf_nxv2f64_nxv2f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnjx_mask_vf_nxv2f64_nxv2f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu @@ -1259,7 +1261,7 @@ entry: %1, double %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1267,9 +1269,9 @@ entry: declare @llvm.riscv.vfsgnjx.nxv4f64.f64( , double, - i32); + iXLen); -define @intrinsic_vfsgnjx_vf_nxv4f64_nxv4f64_f64( %0, double %1, i32 %2) nounwind { +define @intrinsic_vfsgnjx_vf_nxv4f64_nxv4f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu @@ -1279,7 +1281,7 @@ entry: %a = call @llvm.riscv.vfsgnjx.nxv4f64.f64( %0, double %1, - i32 %2) + iXLen %2) ret %a } @@ -1289,10 +1291,10 @@ declare @llvm.riscv.vfsgnjx.mask.nxv4f64.f64( , double, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnjx_mask_vf_nxv4f64_nxv4f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnjx_mask_vf_nxv4f64_nxv4f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu @@ -1304,7 +1306,7 @@ entry: %1, double %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1312,9 +1314,9 @@ entry: declare @llvm.riscv.vfsgnjx.nxv8f64.f64( , double, - i32); + iXLen); -define @intrinsic_vfsgnjx_vf_nxv8f64_nxv8f64_f64( %0, double %1, i32 %2) nounwind { +define @intrinsic_vfsgnjx_vf_nxv8f64_nxv8f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu @@ -1324,7 +1326,7 @@ entry: %a = call @llvm.riscv.vfsgnjx.nxv8f64.f64( %0, double %1, - i32 %2) + iXLen %2) ret %a } @@ -1334,10 +1336,10 @@ declare @llvm.riscv.vfsgnjx.mask.nxv8f64.f64( , double, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnjx_mask_vf_nxv8f64_nxv8f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnjx_mask_vf_nxv8f64_nxv8f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu @@ -1349,7 +1351,7 @@ entry: %1, double %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfslide1down-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfslide1down-rv64.ll deleted file mode 100644 index a0ba31ea266866ea0d2edc27e2ef72a63c98ea0a..0000000000000000000000000000000000000000 --- a/llvm/test/CodeGen/RISCV/rvv/vfslide1down-rv64.ll +++ /dev/null @@ -1,677 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: -target-abi=lp64d < %s | FileCheck %s -declare @llvm.riscv.vfslide1down.nxv1f16.f16( - , - half, - i64); - -define @intrinsic_vfslide1down_vf_nxv1f16_nxv1f16_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv1f16_nxv1f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfslide1down.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfslide1down.nxv1f16.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfslide1down.mask.nxv1f16.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfslide1down_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv1f16_nxv1f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfslide1down.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfslide1down.mask.nxv1f16.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfslide1down.nxv2f16.f16( - , - half, - i64); - -define @intrinsic_vfslide1down_vf_nxv2f16_nxv2f16_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv2f16_nxv2f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfslide1down.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfslide1down.nxv2f16.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfslide1down.mask.nxv2f16.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfslide1down_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv2f16_nxv2f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfslide1down.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfslide1down.mask.nxv2f16.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfslide1down.nxv4f16.f16( - , - half, - i64); - -define @intrinsic_vfslide1down_vf_nxv4f16_nxv4f16_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv4f16_nxv4f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfslide1down.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfslide1down.nxv4f16.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfslide1down.mask.nxv4f16.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfslide1down_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv4f16_nxv4f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfslide1down.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfslide1down.mask.nxv4f16.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfslide1down.nxv8f16.f16( - , - half, - i64); - -define @intrinsic_vfslide1down_vf_nxv8f16_nxv8f16_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv8f16_nxv8f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfslide1down.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfslide1down.nxv8f16.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfslide1down.mask.nxv8f16.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfslide1down_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv8f16_nxv8f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfslide1down.vf v8, v10, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfslide1down.mask.nxv8f16.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfslide1down.nxv16f16.f16( - , - half, - i64); - -define @intrinsic_vfslide1down_vf_nxv16f16_nxv16f16_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv16f16_nxv16f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfslide1down.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfslide1down.nxv16f16.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfslide1down.mask.nxv16f16.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfslide1down_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv16f16_nxv16f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfslide1down.vf v8, v12, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfslide1down.mask.nxv16f16.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfslide1down.nxv32f16.f16( - , - half, - i64); - -define @intrinsic_vfslide1down_vf_nxv32f16_nxv32f16_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv32f16_nxv32f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu -; CHECK-NEXT: vfslide1down.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfslide1down.nxv32f16.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfslide1down.mask.nxv32f16.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfslide1down_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv32f16_nxv32f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu -; CHECK-NEXT: vfslide1down.vf v8, v16, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfslide1down.mask.nxv32f16.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfslide1down.nxv1f32.f32( - , - float, - i64); - -define @intrinsic_vfslide1down_vf_nxv1f32_nxv1f32_f32( %0, float %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv1f32_nxv1f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfslide1down.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfslide1down.nxv1f32.f32( - %0, - float %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfslide1down.mask.nxv1f32.f32( - , - , - float, - , - i64, - i64); - -define @intrinsic_vfslide1down_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv1f32_nxv1f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfslide1down.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfslide1down.mask.nxv1f32.f32( - %0, - %1, - float %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfslide1down.nxv2f32.f32( - , - float, - i64); - -define @intrinsic_vfslide1down_vf_nxv2f32_nxv2f32_f32( %0, float %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv2f32_nxv2f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfslide1down.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfslide1down.nxv2f32.f32( - %0, - float %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfslide1down.mask.nxv2f32.f32( - , - , - float, - , - i64, - i64); - -define @intrinsic_vfslide1down_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv2f32_nxv2f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfslide1down.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfslide1down.mask.nxv2f32.f32( - %0, - %1, - float %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfslide1down.nxv4f32.f32( - , - float, - i64); - -define @intrinsic_vfslide1down_vf_nxv4f32_nxv4f32_f32( %0, float %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv4f32_nxv4f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfslide1down.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfslide1down.nxv4f32.f32( - %0, - float %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfslide1down.mask.nxv4f32.f32( - , - , - float, - , - i64, - i64); - -define @intrinsic_vfslide1down_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv4f32_nxv4f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfslide1down.vf v8, v10, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfslide1down.mask.nxv4f32.f32( - %0, - %1, - float %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfslide1down.nxv8f32.f32( - , - float, - i64); - -define @intrinsic_vfslide1down_vf_nxv8f32_nxv8f32_f32( %0, float %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv8f32_nxv8f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfslide1down.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfslide1down.nxv8f32.f32( - %0, - float %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfslide1down.mask.nxv8f32.f32( - , - , - float, - , - i64, - i64); - -define @intrinsic_vfslide1down_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv8f32_nxv8f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfslide1down.vf v8, v12, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfslide1down.mask.nxv8f32.f32( - %0, - %1, - float %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfslide1down.nxv16f32.f32( - , - float, - i64); - -define @intrinsic_vfslide1down_vf_nxv16f32_nxv16f32_f32( %0, float %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv16f32_nxv16f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu -; CHECK-NEXT: vfslide1down.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfslide1down.nxv16f32.f32( - %0, - float %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfslide1down.mask.nxv16f32.f32( - , - , - float, - , - i64, - i64); - -define @intrinsic_vfslide1down_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv16f32_nxv16f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu -; CHECK-NEXT: vfslide1down.vf v8, v16, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfslide1down.mask.nxv16f32.f32( - %0, - %1, - float %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfslide1down.nxv1f64.f64( - , - double, - i64); - -define @intrinsic_vfslide1down_vf_nxv1f64_nxv1f64_f64( %0, double %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv1f64_nxv1f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vfslide1down.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfslide1down.nxv1f64.f64( - %0, - double %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfslide1down.mask.nxv1f64.f64( - , - , - double, - , - i64, - i64); - -define @intrinsic_vfslide1down_mask_vf_nxv1f64_nxv1f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv1f64_nxv1f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vfslide1down.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfslide1down.mask.nxv1f64.f64( - %0, - %1, - double %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfslide1down.nxv2f64.f64( - , - double, - i64); - -define @intrinsic_vfslide1down_vf_nxv2f64_nxv2f64_f64( %0, double %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv2f64_nxv2f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vfslide1down.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfslide1down.nxv2f64.f64( - %0, - double %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfslide1down.mask.nxv2f64.f64( - , - , - double, - , - i64, - i64); - -define @intrinsic_vfslide1down_mask_vf_nxv2f64_nxv2f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv2f64_nxv2f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vfslide1down.vf v8, v10, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfslide1down.mask.nxv2f64.f64( - %0, - %1, - double %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfslide1down.nxv4f64.f64( - , - double, - i64); - -define @intrinsic_vfslide1down_vf_nxv4f64_nxv4f64_f64( %0, double %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv4f64_nxv4f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vfslide1down.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfslide1down.nxv4f64.f64( - %0, - double %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfslide1down.mask.nxv4f64.f64( - , - , - double, - , - i64, - i64); - -define @intrinsic_vfslide1down_mask_vf_nxv4f64_nxv4f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv4f64_nxv4f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vfslide1down.vf v8, v12, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfslide1down.mask.nxv4f64.f64( - %0, - %1, - double %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfslide1down.nxv8f64.f64( - , - double, - i64); - -define @intrinsic_vfslide1down_vf_nxv8f64_nxv8f64_f64( %0, double %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv8f64_nxv8f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu -; CHECK-NEXT: vfslide1down.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfslide1down.nxv8f64.f64( - %0, - double %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfslide1down.mask.nxv8f64.f64( - , - , - double, - , - i64, - i64); - -define @intrinsic_vfslide1down_mask_vf_nxv8f64_nxv8f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv8f64_nxv8f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu -; CHECK-NEXT: vfslide1down.vf v8, v16, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfslide1down.mask.nxv8f64.f64( - %0, - %1, - double %2, - %3, - i64 %4, i64 1) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfslide1down-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfslide1down.ll similarity index 85% rename from llvm/test/CodeGen/RISCV/rvv/vfslide1down-rv32.ll rename to llvm/test/CodeGen/RISCV/rvv/vfslide1down.ll index 5baadc48d857e5ab779b3f08d47e3d23cc1d7ed8..6cbba483a8d9f4269690ce0b8bf5d29d5ec6bc47 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfslide1down-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfslide1down.ll @@ -1,12 +1,14 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: -target-abi=ilp32d < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfslide1down.nxv1f16.f16( , half, - i32); + iXLen); -define @intrinsic_vfslide1down_vf_nxv1f16_nxv1f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfslide1down_vf_nxv1f16_nxv1f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -16,7 +18,7 @@ entry: %a = call @llvm.riscv.vfslide1down.nxv1f16.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -26,10 +28,10 @@ declare @llvm.riscv.vfslide1down.mask.nxv1f16.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfslide1down_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfslide1down_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -41,7 +43,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -49,9 +51,9 @@ entry: declare @llvm.riscv.vfslide1down.nxv2f16.f16( , half, - i32); + iXLen); -define @intrinsic_vfslide1down_vf_nxv2f16_nxv2f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfslide1down_vf_nxv2f16_nxv2f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -61,7 +63,7 @@ entry: %a = call @llvm.riscv.vfslide1down.nxv2f16.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -71,10 +73,10 @@ declare @llvm.riscv.vfslide1down.mask.nxv2f16.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfslide1down_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfslide1down_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -86,7 +88,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -94,9 +96,9 @@ entry: declare @llvm.riscv.vfslide1down.nxv4f16.f16( , half, - i32); + iXLen); -define @intrinsic_vfslide1down_vf_nxv4f16_nxv4f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfslide1down_vf_nxv4f16_nxv4f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -106,7 +108,7 @@ entry: %a = call @llvm.riscv.vfslide1down.nxv4f16.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -116,10 +118,10 @@ declare @llvm.riscv.vfslide1down.mask.nxv4f16.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfslide1down_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfslide1down_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -131,7 +133,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -139,9 +141,9 @@ entry: declare @llvm.riscv.vfslide1down.nxv8f16.f16( , half, - i32); + iXLen); -define @intrinsic_vfslide1down_vf_nxv8f16_nxv8f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfslide1down_vf_nxv8f16_nxv8f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -151,7 +153,7 @@ entry: %a = call @llvm.riscv.vfslide1down.nxv8f16.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -161,10 +163,10 @@ declare @llvm.riscv.vfslide1down.mask.nxv8f16.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfslide1down_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfslide1down_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -176,7 +178,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -184,9 +186,9 @@ entry: declare @llvm.riscv.vfslide1down.nxv16f16.f16( , half, - i32); + iXLen); -define @intrinsic_vfslide1down_vf_nxv16f16_nxv16f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfslide1down_vf_nxv16f16_nxv16f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -196,7 +198,7 @@ entry: %a = call @llvm.riscv.vfslide1down.nxv16f16.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -206,10 +208,10 @@ declare @llvm.riscv.vfslide1down.mask.nxv16f16.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfslide1down_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfslide1down_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -221,7 +223,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -229,9 +231,9 @@ entry: declare @llvm.riscv.vfslide1down.nxv32f16.f16( , half, - i32); + iXLen); -define @intrinsic_vfslide1down_vf_nxv32f16_nxv32f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfslide1down_vf_nxv32f16_nxv32f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu @@ -241,7 +243,7 @@ entry: %a = call @llvm.riscv.vfslide1down.nxv32f16.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -251,10 +253,10 @@ declare @llvm.riscv.vfslide1down.mask.nxv32f16.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfslide1down_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfslide1down_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu @@ -266,7 +268,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -274,9 +276,9 @@ entry: declare @llvm.riscv.vfslide1down.nxv1f32.f32( , float, - i32); + iXLen); -define @intrinsic_vfslide1down_vf_nxv1f32_nxv1f32_f32( %0, float %1, i32 %2) nounwind { +define @intrinsic_vfslide1down_vf_nxv1f32_nxv1f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -286,7 +288,7 @@ entry: %a = call @llvm.riscv.vfslide1down.nxv1f32.f32( %0, float %1, - i32 %2) + iXLen %2) ret %a } @@ -296,10 +298,10 @@ declare @llvm.riscv.vfslide1down.mask.nxv1f32.f32( , float, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfslide1down_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +define @intrinsic_vfslide1down_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -311,7 +313,7 @@ entry: %1, float %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -319,9 +321,9 @@ entry: declare @llvm.riscv.vfslide1down.nxv2f32.f32( , float, - i32); + iXLen); -define @intrinsic_vfslide1down_vf_nxv2f32_nxv2f32_f32( %0, float %1, i32 %2) nounwind { +define @intrinsic_vfslide1down_vf_nxv2f32_nxv2f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -331,7 +333,7 @@ entry: %a = call @llvm.riscv.vfslide1down.nxv2f32.f32( %0, float %1, - i32 %2) + iXLen %2) ret %a } @@ -341,10 +343,10 @@ declare @llvm.riscv.vfslide1down.mask.nxv2f32.f32( , float, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfslide1down_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +define @intrinsic_vfslide1down_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -356,7 +358,7 @@ entry: %1, float %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -364,9 +366,9 @@ entry: declare @llvm.riscv.vfslide1down.nxv4f32.f32( , float, - i32); + iXLen); -define @intrinsic_vfslide1down_vf_nxv4f32_nxv4f32_f32( %0, float %1, i32 %2) nounwind { +define @intrinsic_vfslide1down_vf_nxv4f32_nxv4f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -376,7 +378,7 @@ entry: %a = call @llvm.riscv.vfslide1down.nxv4f32.f32( %0, float %1, - i32 %2) + iXLen %2) ret %a } @@ -386,10 +388,10 @@ declare @llvm.riscv.vfslide1down.mask.nxv4f32.f32( , float, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfslide1down_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +define @intrinsic_vfslide1down_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -401,7 +403,7 @@ entry: %1, float %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -409,9 +411,9 @@ entry: declare @llvm.riscv.vfslide1down.nxv8f32.f32( , float, - i32); + iXLen); -define @intrinsic_vfslide1down_vf_nxv8f32_nxv8f32_f32( %0, float %1, i32 %2) nounwind { +define @intrinsic_vfslide1down_vf_nxv8f32_nxv8f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -421,7 +423,7 @@ entry: %a = call @llvm.riscv.vfslide1down.nxv8f32.f32( %0, float %1, - i32 %2) + iXLen %2) ret %a } @@ -431,10 +433,10 @@ declare @llvm.riscv.vfslide1down.mask.nxv8f32.f32( , float, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfslide1down_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +define @intrinsic_vfslide1down_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -446,7 +448,7 @@ entry: %1, float %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -454,9 +456,9 @@ entry: declare @llvm.riscv.vfslide1down.nxv16f32.f32( , float, - i32); + iXLen); -define @intrinsic_vfslide1down_vf_nxv16f32_nxv16f32_f32( %0, float %1, i32 %2) nounwind { +define @intrinsic_vfslide1down_vf_nxv16f32_nxv16f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu @@ -466,7 +468,7 @@ entry: %a = call @llvm.riscv.vfslide1down.nxv16f32.f32( %0, float %1, - i32 %2) + iXLen %2) ret %a } @@ -476,10 +478,10 @@ declare @llvm.riscv.vfslide1down.mask.nxv16f32.f32( , float, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfslide1down_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +define @intrinsic_vfslide1down_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu @@ -491,7 +493,7 @@ entry: %1, float %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -499,9 +501,9 @@ entry: declare @llvm.riscv.vfslide1down.nxv1f64.f64( , double, - i32); + iXLen); -define @intrinsic_vfslide1down_vf_nxv1f64_nxv1f64_f64( %0, double %1, i32 %2) nounwind { +define @intrinsic_vfslide1down_vf_nxv1f64_nxv1f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu @@ -511,7 +513,7 @@ entry: %a = call @llvm.riscv.vfslide1down.nxv1f64.f64( %0, double %1, - i32 %2) + iXLen %2) ret %a } @@ -521,10 +523,10 @@ declare @llvm.riscv.vfslide1down.mask.nxv1f64.f64( , double, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfslide1down_mask_vf_nxv1f64_nxv1f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { +define @intrinsic_vfslide1down_mask_vf_nxv1f64_nxv1f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu @@ -536,7 +538,7 @@ entry: %1, double %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -544,9 +546,9 @@ entry: declare @llvm.riscv.vfslide1down.nxv2f64.f64( , double, - i32); + iXLen); -define @intrinsic_vfslide1down_vf_nxv2f64_nxv2f64_f64( %0, double %1, i32 %2) nounwind { +define @intrinsic_vfslide1down_vf_nxv2f64_nxv2f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu @@ -556,7 +558,7 @@ entry: %a = call @llvm.riscv.vfslide1down.nxv2f64.f64( %0, double %1, - i32 %2) + iXLen %2) ret %a } @@ -566,10 +568,10 @@ declare @llvm.riscv.vfslide1down.mask.nxv2f64.f64( , double, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfslide1down_mask_vf_nxv2f64_nxv2f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { +define @intrinsic_vfslide1down_mask_vf_nxv2f64_nxv2f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu @@ -581,7 +583,7 @@ entry: %1, double %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -589,9 +591,9 @@ entry: declare @llvm.riscv.vfslide1down.nxv4f64.f64( , double, - i32); + iXLen); -define @intrinsic_vfslide1down_vf_nxv4f64_nxv4f64_f64( %0, double %1, i32 %2) nounwind { +define @intrinsic_vfslide1down_vf_nxv4f64_nxv4f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu @@ -601,7 +603,7 @@ entry: %a = call @llvm.riscv.vfslide1down.nxv4f64.f64( %0, double %1, - i32 %2) + iXLen %2) ret %a } @@ -611,10 +613,10 @@ declare @llvm.riscv.vfslide1down.mask.nxv4f64.f64( , double, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfslide1down_mask_vf_nxv4f64_nxv4f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { +define @intrinsic_vfslide1down_mask_vf_nxv4f64_nxv4f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu @@ -626,7 +628,7 @@ entry: %1, double %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -634,9 +636,9 @@ entry: declare @llvm.riscv.vfslide1down.nxv8f64.f64( , double, - i32); + iXLen); -define @intrinsic_vfslide1down_vf_nxv8f64_nxv8f64_f64( %0, double %1, i32 %2) nounwind { +define @intrinsic_vfslide1down_vf_nxv8f64_nxv8f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu @@ -646,7 +648,7 @@ entry: %a = call @llvm.riscv.vfslide1down.nxv8f64.f64( %0, double %1, - i32 %2) + iXLen %2) ret %a } @@ -656,10 +658,10 @@ declare @llvm.riscv.vfslide1down.mask.nxv8f64.f64( , double, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfslide1down_mask_vf_nxv8f64_nxv8f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { +define @intrinsic_vfslide1down_mask_vf_nxv8f64_nxv8f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu @@ -671,7 +673,7 @@ entry: %1, double %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfslide1up-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfslide1up-rv64.ll deleted file mode 100644 index 4b7d1fe55e2443d6fe2dbc2166e966e7c4521135..0000000000000000000000000000000000000000 --- a/llvm/test/CodeGen/RISCV/rvv/vfslide1up-rv64.ll +++ /dev/null @@ -1,692 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: -target-abi=lp64d < %s | FileCheck %s -declare @llvm.riscv.vfslide1up.nxv1f16.f16( - , - half, - i64); - -define @intrinsic_vfslide1up_vf_nxv1f16_nxv1f16_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv1f16_nxv1f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfslide1up.vf v9, v8, fa0 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfslide1up.nxv1f16.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfslide1up.mask.nxv1f16.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfslide1up_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv1f16_nxv1f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfslide1up.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfslide1up.mask.nxv1f16.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfslide1up.nxv2f16.f16( - , - half, - i64); - -define @intrinsic_vfslide1up_vf_nxv2f16_nxv2f16_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv2f16_nxv2f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfslide1up.vf v9, v8, fa0 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfslide1up.nxv2f16.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfslide1up.mask.nxv2f16.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfslide1up_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv2f16_nxv2f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfslide1up.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfslide1up.mask.nxv2f16.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfslide1up.nxv4f16.f16( - , - half, - i64); - -define @intrinsic_vfslide1up_vf_nxv4f16_nxv4f16_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv4f16_nxv4f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfslide1up.vf v9, v8, fa0 -; CHECK-NEXT: vmv.v.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfslide1up.nxv4f16.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfslide1up.mask.nxv4f16.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfslide1up_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv4f16_nxv4f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfslide1up.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfslide1up.mask.nxv4f16.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfslide1up.nxv8f16.f16( - , - half, - i64); - -define @intrinsic_vfslide1up_vf_nxv8f16_nxv8f16_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv8f16_nxv8f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfslide1up.vf v10, v8, fa0 -; CHECK-NEXT: vmv.v.v v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfslide1up.nxv8f16.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfslide1up.mask.nxv8f16.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfslide1up_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv8f16_nxv8f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfslide1up.vf v8, v10, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfslide1up.mask.nxv8f16.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfslide1up.nxv16f16.f16( - , - half, - i64); - -define @intrinsic_vfslide1up_vf_nxv16f16_nxv16f16_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv16f16_nxv16f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfslide1up.vf v12, v8, fa0 -; CHECK-NEXT: vmv.v.v v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfslide1up.nxv16f16.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfslide1up.mask.nxv16f16.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfslide1up_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv16f16_nxv16f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfslide1up.vf v8, v12, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfslide1up.mask.nxv16f16.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfslide1up.nxv32f16.f16( - , - half, - i64); - -define @intrinsic_vfslide1up_vf_nxv32f16_nxv32f16_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv32f16_nxv32f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu -; CHECK-NEXT: vfslide1up.vf v16, v8, fa0 -; CHECK-NEXT: vmv.v.v v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfslide1up.nxv32f16.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfslide1up.mask.nxv32f16.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfslide1up_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv32f16_nxv32f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu -; CHECK-NEXT: vfslide1up.vf v8, v16, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfslide1up.mask.nxv32f16.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfslide1up.nxv1f32.f32( - , - float, - i64); - -define @intrinsic_vfslide1up_vf_nxv1f32_nxv1f32_f32( %0, float %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv1f32_nxv1f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfslide1up.vf v9, v8, fa0 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfslide1up.nxv1f32.f32( - %0, - float %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfslide1up.mask.nxv1f32.f32( - , - , - float, - , - i64, - i64); - -define @intrinsic_vfslide1up_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv1f32_nxv1f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfslide1up.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfslide1up.mask.nxv1f32.f32( - %0, - %1, - float %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfslide1up.nxv2f32.f32( - , - float, - i64); - -define @intrinsic_vfslide1up_vf_nxv2f32_nxv2f32_f32( %0, float %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv2f32_nxv2f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfslide1up.vf v9, v8, fa0 -; CHECK-NEXT: vmv.v.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfslide1up.nxv2f32.f32( - %0, - float %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfslide1up.mask.nxv2f32.f32( - , - , - float, - , - i64, - i64); - -define @intrinsic_vfslide1up_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv2f32_nxv2f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfslide1up.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfslide1up.mask.nxv2f32.f32( - %0, - %1, - float %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfslide1up.nxv4f32.f32( - , - float, - i64); - -define @intrinsic_vfslide1up_vf_nxv4f32_nxv4f32_f32( %0, float %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv4f32_nxv4f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfslide1up.vf v10, v8, fa0 -; CHECK-NEXT: vmv.v.v v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfslide1up.nxv4f32.f32( - %0, - float %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfslide1up.mask.nxv4f32.f32( - , - , - float, - , - i64, - i64); - -define @intrinsic_vfslide1up_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv4f32_nxv4f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfslide1up.vf v8, v10, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfslide1up.mask.nxv4f32.f32( - %0, - %1, - float %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfslide1up.nxv8f32.f32( - , - float, - i64); - -define @intrinsic_vfslide1up_vf_nxv8f32_nxv8f32_f32( %0, float %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv8f32_nxv8f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfslide1up.vf v12, v8, fa0 -; CHECK-NEXT: vmv.v.v v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfslide1up.nxv8f32.f32( - %0, - float %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfslide1up.mask.nxv8f32.f32( - , - , - float, - , - i64, - i64); - -define @intrinsic_vfslide1up_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv8f32_nxv8f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfslide1up.vf v8, v12, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfslide1up.mask.nxv8f32.f32( - %0, - %1, - float %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfslide1up.nxv16f32.f32( - , - float, - i64); - -define @intrinsic_vfslide1up_vf_nxv16f32_nxv16f32_f32( %0, float %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv16f32_nxv16f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu -; CHECK-NEXT: vfslide1up.vf v16, v8, fa0 -; CHECK-NEXT: vmv.v.v v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfslide1up.nxv16f32.f32( - %0, - float %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfslide1up.mask.nxv16f32.f32( - , - , - float, - , - i64, - i64); - -define @intrinsic_vfslide1up_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv16f32_nxv16f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu -; CHECK-NEXT: vfslide1up.vf v8, v16, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfslide1up.mask.nxv16f32.f32( - %0, - %1, - float %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfslide1up.nxv1f64.f64( - , - double, - i64); - -define @intrinsic_vfslide1up_vf_nxv1f64_nxv1f64_f64( %0, double %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv1f64_nxv1f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vfslide1up.vf v9, v8, fa0 -; CHECK-NEXT: vmv.v.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfslide1up.nxv1f64.f64( - %0, - double %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfslide1up.mask.nxv1f64.f64( - , - , - double, - , - i64, - i64); - -define @intrinsic_vfslide1up_mask_vf_nxv1f64_nxv1f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv1f64_nxv1f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vfslide1up.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfslide1up.mask.nxv1f64.f64( - %0, - %1, - double %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfslide1up.nxv2f64.f64( - , - double, - i64); - -define @intrinsic_vfslide1up_vf_nxv2f64_nxv2f64_f64( %0, double %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv2f64_nxv2f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vfslide1up.vf v10, v8, fa0 -; CHECK-NEXT: vmv.v.v v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfslide1up.nxv2f64.f64( - %0, - double %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfslide1up.mask.nxv2f64.f64( - , - , - double, - , - i64, - i64); - -define @intrinsic_vfslide1up_mask_vf_nxv2f64_nxv2f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv2f64_nxv2f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vfslide1up.vf v8, v10, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfslide1up.mask.nxv2f64.f64( - %0, - %1, - double %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfslide1up.nxv4f64.f64( - , - double, - i64); - -define @intrinsic_vfslide1up_vf_nxv4f64_nxv4f64_f64( %0, double %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv4f64_nxv4f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vfslide1up.vf v12, v8, fa0 -; CHECK-NEXT: vmv.v.v v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfslide1up.nxv4f64.f64( - %0, - double %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfslide1up.mask.nxv4f64.f64( - , - , - double, - , - i64, - i64); - -define @intrinsic_vfslide1up_mask_vf_nxv4f64_nxv4f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv4f64_nxv4f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vfslide1up.vf v8, v12, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfslide1up.mask.nxv4f64.f64( - %0, - %1, - double %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfslide1up.nxv8f64.f64( - , - double, - i64); - -define @intrinsic_vfslide1up_vf_nxv8f64_nxv8f64_f64( %0, double %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv8f64_nxv8f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu -; CHECK-NEXT: vfslide1up.vf v16, v8, fa0 -; CHECK-NEXT: vmv.v.v v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfslide1up.nxv8f64.f64( - %0, - double %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfslide1up.mask.nxv8f64.f64( - , - , - double, - , - i64, - i64); - -define @intrinsic_vfslide1up_mask_vf_nxv8f64_nxv8f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv8f64_nxv8f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu -; CHECK-NEXT: vfslide1up.vf v8, v16, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfslide1up.mask.nxv8f64.f64( - %0, - %1, - double %2, - %3, - i64 %4, i64 1) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfslide1up-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfslide1up.ll similarity index 86% rename from llvm/test/CodeGen/RISCV/rvv/vfslide1up-rv32.ll rename to llvm/test/CodeGen/RISCV/rvv/vfslide1up.ll index 271bf70522bfada7ea0255676f8c4b2c0590f181..695cf7aab3f6cf0b6ea5f84a17aae1b9bee7899c 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfslide1up-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfslide1up.ll @@ -1,12 +1,14 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: -target-abi=ilp32d < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfslide1up.nxv1f16.f16( , half, - i32); + iXLen); -define @intrinsic_vfslide1up_vf_nxv1f16_nxv1f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfslide1up_vf_nxv1f16_nxv1f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -17,7 +19,7 @@ entry: %a = call @llvm.riscv.vfslide1up.nxv1f16.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -27,10 +29,10 @@ declare @llvm.riscv.vfslide1up.mask.nxv1f16.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfslide1up_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfslide1up_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -42,7 +44,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -50,9 +52,9 @@ entry: declare @llvm.riscv.vfslide1up.nxv2f16.f16( , half, - i32); + iXLen); -define @intrinsic_vfslide1up_vf_nxv2f16_nxv2f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfslide1up_vf_nxv2f16_nxv2f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -63,7 +65,7 @@ entry: %a = call @llvm.riscv.vfslide1up.nxv2f16.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -73,10 +75,10 @@ declare @llvm.riscv.vfslide1up.mask.nxv2f16.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfslide1up_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfslide1up_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -88,7 +90,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -96,9 +98,9 @@ entry: declare @llvm.riscv.vfslide1up.nxv4f16.f16( , half, - i32); + iXLen); -define @intrinsic_vfslide1up_vf_nxv4f16_nxv4f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfslide1up_vf_nxv4f16_nxv4f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -109,7 +111,7 @@ entry: %a = call @llvm.riscv.vfslide1up.nxv4f16.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -119,10 +121,10 @@ declare @llvm.riscv.vfslide1up.mask.nxv4f16.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfslide1up_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfslide1up_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -134,7 +136,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -142,9 +144,9 @@ entry: declare @llvm.riscv.vfslide1up.nxv8f16.f16( , half, - i32); + iXLen); -define @intrinsic_vfslide1up_vf_nxv8f16_nxv8f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfslide1up_vf_nxv8f16_nxv8f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -155,7 +157,7 @@ entry: %a = call @llvm.riscv.vfslide1up.nxv8f16.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -165,10 +167,10 @@ declare @llvm.riscv.vfslide1up.mask.nxv8f16.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfslide1up_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfslide1up_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -180,7 +182,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -188,9 +190,9 @@ entry: declare @llvm.riscv.vfslide1up.nxv16f16.f16( , half, - i32); + iXLen); -define @intrinsic_vfslide1up_vf_nxv16f16_nxv16f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfslide1up_vf_nxv16f16_nxv16f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -201,7 +203,7 @@ entry: %a = call @llvm.riscv.vfslide1up.nxv16f16.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -211,10 +213,10 @@ declare @llvm.riscv.vfslide1up.mask.nxv16f16.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfslide1up_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfslide1up_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -226,7 +228,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -234,9 +236,9 @@ entry: declare @llvm.riscv.vfslide1up.nxv32f16.f16( , half, - i32); + iXLen); -define @intrinsic_vfslide1up_vf_nxv32f16_nxv32f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfslide1up_vf_nxv32f16_nxv32f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu @@ -247,7 +249,7 @@ entry: %a = call @llvm.riscv.vfslide1up.nxv32f16.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -257,10 +259,10 @@ declare @llvm.riscv.vfslide1up.mask.nxv32f16.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfslide1up_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfslide1up_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu @@ -272,7 +274,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -280,9 +282,9 @@ entry: declare @llvm.riscv.vfslide1up.nxv1f32.f32( , float, - i32); + iXLen); -define @intrinsic_vfslide1up_vf_nxv1f32_nxv1f32_f32( %0, float %1, i32 %2) nounwind { +define @intrinsic_vfslide1up_vf_nxv1f32_nxv1f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -293,7 +295,7 @@ entry: %a = call @llvm.riscv.vfslide1up.nxv1f32.f32( %0, float %1, - i32 %2) + iXLen %2) ret %a } @@ -303,10 +305,10 @@ declare @llvm.riscv.vfslide1up.mask.nxv1f32.f32( , float, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfslide1up_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +define @intrinsic_vfslide1up_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -318,7 +320,7 @@ entry: %1, float %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -326,9 +328,9 @@ entry: declare @llvm.riscv.vfslide1up.nxv2f32.f32( , float, - i32); + iXLen); -define @intrinsic_vfslide1up_vf_nxv2f32_nxv2f32_f32( %0, float %1, i32 %2) nounwind { +define @intrinsic_vfslide1up_vf_nxv2f32_nxv2f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -339,7 +341,7 @@ entry: %a = call @llvm.riscv.vfslide1up.nxv2f32.f32( %0, float %1, - i32 %2) + iXLen %2) ret %a } @@ -349,10 +351,10 @@ declare @llvm.riscv.vfslide1up.mask.nxv2f32.f32( , float, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfslide1up_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +define @intrinsic_vfslide1up_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -364,7 +366,7 @@ entry: %1, float %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -372,9 +374,9 @@ entry: declare @llvm.riscv.vfslide1up.nxv4f32.f32( , float, - i32); + iXLen); -define @intrinsic_vfslide1up_vf_nxv4f32_nxv4f32_f32( %0, float %1, i32 %2) nounwind { +define @intrinsic_vfslide1up_vf_nxv4f32_nxv4f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -385,7 +387,7 @@ entry: %a = call @llvm.riscv.vfslide1up.nxv4f32.f32( %0, float %1, - i32 %2) + iXLen %2) ret %a } @@ -395,10 +397,10 @@ declare @llvm.riscv.vfslide1up.mask.nxv4f32.f32( , float, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfslide1up_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +define @intrinsic_vfslide1up_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -410,7 +412,7 @@ entry: %1, float %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -418,9 +420,9 @@ entry: declare @llvm.riscv.vfslide1up.nxv8f32.f32( , float, - i32); + iXLen); -define @intrinsic_vfslide1up_vf_nxv8f32_nxv8f32_f32( %0, float %1, i32 %2) nounwind { +define @intrinsic_vfslide1up_vf_nxv8f32_nxv8f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -431,7 +433,7 @@ entry: %a = call @llvm.riscv.vfslide1up.nxv8f32.f32( %0, float %1, - i32 %2) + iXLen %2) ret %a } @@ -441,10 +443,10 @@ declare @llvm.riscv.vfslide1up.mask.nxv8f32.f32( , float, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfslide1up_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +define @intrinsic_vfslide1up_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -456,7 +458,7 @@ entry: %1, float %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -464,9 +466,9 @@ entry: declare @llvm.riscv.vfslide1up.nxv16f32.f32( , float, - i32); + iXLen); -define @intrinsic_vfslide1up_vf_nxv16f32_nxv16f32_f32( %0, float %1, i32 %2) nounwind { +define @intrinsic_vfslide1up_vf_nxv16f32_nxv16f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu @@ -477,7 +479,7 @@ entry: %a = call @llvm.riscv.vfslide1up.nxv16f32.f32( %0, float %1, - i32 %2) + iXLen %2) ret %a } @@ -487,10 +489,10 @@ declare @llvm.riscv.vfslide1up.mask.nxv16f32.f32( , float, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfslide1up_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +define @intrinsic_vfslide1up_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu @@ -502,7 +504,7 @@ entry: %1, float %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -510,9 +512,9 @@ entry: declare @llvm.riscv.vfslide1up.nxv1f64.f64( , double, - i32); + iXLen); -define @intrinsic_vfslide1up_vf_nxv1f64_nxv1f64_f64( %0, double %1, i32 %2) nounwind { +define @intrinsic_vfslide1up_vf_nxv1f64_nxv1f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu @@ -523,7 +525,7 @@ entry: %a = call @llvm.riscv.vfslide1up.nxv1f64.f64( %0, double %1, - i32 %2) + iXLen %2) ret %a } @@ -533,10 +535,10 @@ declare @llvm.riscv.vfslide1up.mask.nxv1f64.f64( , double, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfslide1up_mask_vf_nxv1f64_nxv1f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { +define @intrinsic_vfslide1up_mask_vf_nxv1f64_nxv1f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu @@ -548,7 +550,7 @@ entry: %1, double %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -556,9 +558,9 @@ entry: declare @llvm.riscv.vfslide1up.nxv2f64.f64( , double, - i32); + iXLen); -define @intrinsic_vfslide1up_vf_nxv2f64_nxv2f64_f64( %0, double %1, i32 %2) nounwind { +define @intrinsic_vfslide1up_vf_nxv2f64_nxv2f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu @@ -569,7 +571,7 @@ entry: %a = call @llvm.riscv.vfslide1up.nxv2f64.f64( %0, double %1, - i32 %2) + iXLen %2) ret %a } @@ -579,10 +581,10 @@ declare @llvm.riscv.vfslide1up.mask.nxv2f64.f64( , double, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfslide1up_mask_vf_nxv2f64_nxv2f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { +define @intrinsic_vfslide1up_mask_vf_nxv2f64_nxv2f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu @@ -594,7 +596,7 @@ entry: %1, double %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -602,9 +604,9 @@ entry: declare @llvm.riscv.vfslide1up.nxv4f64.f64( , double, - i32); + iXLen); -define @intrinsic_vfslide1up_vf_nxv4f64_nxv4f64_f64( %0, double %1, i32 %2) nounwind { +define @intrinsic_vfslide1up_vf_nxv4f64_nxv4f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu @@ -615,7 +617,7 @@ entry: %a = call @llvm.riscv.vfslide1up.nxv4f64.f64( %0, double %1, - i32 %2) + iXLen %2) ret %a } @@ -625,10 +627,10 @@ declare @llvm.riscv.vfslide1up.mask.nxv4f64.f64( , double, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfslide1up_mask_vf_nxv4f64_nxv4f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { +define @intrinsic_vfslide1up_mask_vf_nxv4f64_nxv4f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu @@ -640,7 +642,7 @@ entry: %1, double %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -648,9 +650,9 @@ entry: declare @llvm.riscv.vfslide1up.nxv8f64.f64( , double, - i32); + iXLen); -define @intrinsic_vfslide1up_vf_nxv8f64_nxv8f64_f64( %0, double %1, i32 %2) nounwind { +define @intrinsic_vfslide1up_vf_nxv8f64_nxv8f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu @@ -661,7 +663,7 @@ entry: %a = call @llvm.riscv.vfslide1up.nxv8f64.f64( %0, double %1, - i32 %2) + iXLen %2) ret %a } @@ -671,10 +673,10 @@ declare @llvm.riscv.vfslide1up.mask.nxv8f64.f64( , double, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfslide1up_mask_vf_nxv8f64_nxv8f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { +define @intrinsic_vfslide1up_mask_vf_nxv8f64_nxv8f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu @@ -686,7 +688,7 @@ entry: %1, double %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsqrt-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfsqrt-rv32.ll deleted file mode 100644 index 3b86fd763f3c9ae6dc70c99e47becc87bea7ff13..0000000000000000000000000000000000000000 --- a/llvm/test/CodeGen/RISCV/rvv/vfsqrt-rv32.ll +++ /dev/null @@ -1,548 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s -declare @llvm.riscv.vfsqrt.nxv1f16( - , - i32); - -define @intrinsic_vfsqrt_v_nxv1f16_nxv1f16( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vfsqrt_v_nxv1f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfsqrt.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsqrt.nxv1f16( - %0, - i32 %1) - - ret %a -} - -declare @llvm.riscv.vfsqrt.mask.nxv1f16( - , - , - , - i32, - i32); - -define @intrinsic_vfsqrt_mask_v_nxv1f16_nxv1f16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv1f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfsqrt.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsqrt.mask.nxv1f16( - %1, - %2, - %0, - i32 %3, i32 1) - - ret %a -} - -declare @llvm.riscv.vfsqrt.nxv2f16( - , - i32); - -define @intrinsic_vfsqrt_v_nxv2f16_nxv2f16( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vfsqrt_v_nxv2f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfsqrt.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsqrt.nxv2f16( - %0, - i32 %1) - - ret %a -} - -declare @llvm.riscv.vfsqrt.mask.nxv2f16( - , - , - , - i32, - i32); - -define @intrinsic_vfsqrt_mask_v_nxv2f16_nxv2f16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv2f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfsqrt.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsqrt.mask.nxv2f16( - %1, - %2, - %0, - i32 %3, i32 1) - - ret %a -} - -declare @llvm.riscv.vfsqrt.nxv4f16( - , - i32); - -define @intrinsic_vfsqrt_v_nxv4f16_nxv4f16( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vfsqrt_v_nxv4f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfsqrt.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsqrt.nxv4f16( - %0, - i32 %1) - - ret %a -} - -declare @llvm.riscv.vfsqrt.mask.nxv4f16( - , - , - , - i32, - i32); - -define @intrinsic_vfsqrt_mask_v_nxv4f16_nxv4f16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv4f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfsqrt.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsqrt.mask.nxv4f16( - %1, - %2, - %0, - i32 %3, i32 1) - - ret %a -} - -declare @llvm.riscv.vfsqrt.nxv8f16( - , - i32); - -define @intrinsic_vfsqrt_v_nxv8f16_nxv8f16( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vfsqrt_v_nxv8f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfsqrt.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsqrt.nxv8f16( - %0, - i32 %1) - - ret %a -} - -declare @llvm.riscv.vfsqrt.mask.nxv8f16( - , - , - , - i32, - i32); - -define @intrinsic_vfsqrt_mask_v_nxv8f16_nxv8f16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv8f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfsqrt.v v8, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsqrt.mask.nxv8f16( - %1, - %2, - %0, - i32 %3, i32 1) - - ret %a -} - -declare @llvm.riscv.vfsqrt.nxv16f16( - , - i32); - -define @intrinsic_vfsqrt_v_nxv16f16_nxv16f16( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vfsqrt_v_nxv16f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfsqrt.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsqrt.nxv16f16( - %0, - i32 %1) - - ret %a -} - -declare @llvm.riscv.vfsqrt.mask.nxv16f16( - , - , - , - i32, - i32); - -define @intrinsic_vfsqrt_mask_v_nxv16f16_nxv16f16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv16f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfsqrt.v v8, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsqrt.mask.nxv16f16( - %1, - %2, - %0, - i32 %3, i32 1) - - ret %a -} - -declare @llvm.riscv.vfsqrt.nxv32f16( - , - i32); - -define @intrinsic_vfsqrt_v_nxv32f16_nxv32f16( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vfsqrt_v_nxv32f16_nxv32f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu -; CHECK-NEXT: vfsqrt.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsqrt.nxv32f16( - %0, - i32 %1) - - ret %a -} - -declare @llvm.riscv.vfsqrt.nxv1f32( - , - i32); - -define @intrinsic_vfsqrt_v_nxv1f32_nxv1f32( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vfsqrt_v_nxv1f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfsqrt.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsqrt.nxv1f32( - %0, - i32 %1) - - ret %a -} - -declare @llvm.riscv.vfsqrt.mask.nxv1f32( - , - , - , - i32, - i32); - -define @intrinsic_vfsqrt_mask_v_nxv1f32_nxv1f32( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv1f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfsqrt.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsqrt.mask.nxv1f32( - %1, - %2, - %0, - i32 %3, i32 1) - - ret %a -} - -declare @llvm.riscv.vfsqrt.nxv2f32( - , - i32); - -define @intrinsic_vfsqrt_v_nxv2f32_nxv2f32( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vfsqrt_v_nxv2f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfsqrt.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsqrt.nxv2f32( - %0, - i32 %1) - - ret %a -} - -declare @llvm.riscv.vfsqrt.mask.nxv2f32( - , - , - , - i32, - i32); - -define @intrinsic_vfsqrt_mask_v_nxv2f32_nxv2f32( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv2f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfsqrt.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsqrt.mask.nxv2f32( - %1, - %2, - %0, - i32 %3, i32 1) - - ret %a -} - -declare @llvm.riscv.vfsqrt.nxv4f32( - , - i32); - -define @intrinsic_vfsqrt_v_nxv4f32_nxv4f32( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vfsqrt_v_nxv4f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfsqrt.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsqrt.nxv4f32( - %0, - i32 %1) - - ret %a -} - -declare @llvm.riscv.vfsqrt.mask.nxv4f32( - , - , - , - i32, - i32); - -define @intrinsic_vfsqrt_mask_v_nxv4f32_nxv4f32( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv4f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfsqrt.v v8, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsqrt.mask.nxv4f32( - %1, - %2, - %0, - i32 %3, i32 1) - - ret %a -} - -declare @llvm.riscv.vfsqrt.nxv8f32( - , - i32); - -define @intrinsic_vfsqrt_v_nxv8f32_nxv8f32( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vfsqrt_v_nxv8f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfsqrt.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsqrt.nxv8f32( - %0, - i32 %1) - - ret %a -} - -declare @llvm.riscv.vfsqrt.mask.nxv8f32( - , - , - , - i32, - i32); - -define @intrinsic_vfsqrt_mask_v_nxv8f32_nxv8f32( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv8f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfsqrt.v v8, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsqrt.mask.nxv8f32( - %1, - %2, - %0, - i32 %3, i32 1) - - ret %a -} - -declare @llvm.riscv.vfsqrt.nxv16f32( - , - i32); - -define @intrinsic_vfsqrt_v_nxv16f32_nxv16f32( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vfsqrt_v_nxv16f32_nxv16f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu -; CHECK-NEXT: vfsqrt.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsqrt.nxv16f32( - %0, - i32 %1) - - ret %a -} - -declare @llvm.riscv.vfsqrt.nxv1f64( - , - i32); - -define @intrinsic_vfsqrt_v_nxv1f64_nxv1f64( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vfsqrt_v_nxv1f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vfsqrt.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsqrt.nxv1f64( - %0, - i32 %1) - - ret %a -} - -declare @llvm.riscv.vfsqrt.mask.nxv1f64( - , - , - , - i32, - i32); - -define @intrinsic_vfsqrt_mask_v_nxv1f64_nxv1f64( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv1f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vfsqrt.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsqrt.mask.nxv1f64( - %1, - %2, - %0, - i32 %3, i32 1) - - ret %a -} - -declare @llvm.riscv.vfsqrt.nxv2f64( - , - i32); - -define @intrinsic_vfsqrt_v_nxv2f64_nxv2f64( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vfsqrt_v_nxv2f64_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vfsqrt.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsqrt.nxv2f64( - %0, - i32 %1) - - ret %a -} - -declare @llvm.riscv.vfsqrt.mask.nxv2f64( - , - , - , - i32, - i32); - -define @intrinsic_vfsqrt_mask_v_nxv2f64_nxv2f64( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv2f64_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vfsqrt.v v8, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsqrt.mask.nxv2f64( - %1, - %2, - %0, - i32 %3, i32 1) - - ret %a -} - -declare @llvm.riscv.vfsqrt.nxv4f64( - , - i32); - -define @intrinsic_vfsqrt_v_nxv4f64_nxv4f64( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vfsqrt_v_nxv4f64_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vfsqrt.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsqrt.nxv4f64( - %0, - i32 %1) - - ret %a -} - -declare @llvm.riscv.vfsqrt.mask.nxv4f64( - , - , - , - i32, - i32); - -define @intrinsic_vfsqrt_mask_v_nxv4f64_nxv4f64( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv4f64_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vfsqrt.v v8, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsqrt.mask.nxv4f64( - %1, - %2, - %0, - i32 %3, i32 1) - - ret %a -} - -declare @llvm.riscv.vfsqrt.nxv8f64( - , - i32); - -define @intrinsic_vfsqrt_v_nxv8f64_nxv8f64( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vfsqrt_v_nxv8f64_nxv8f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu -; CHECK-NEXT: vfsqrt.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsqrt.nxv8f64( - %0, - i32 %1) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsqrt-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfsqrt.ll similarity index 80% rename from llvm/test/CodeGen/RISCV/rvv/vfsqrt-rv64.ll rename to llvm/test/CodeGen/RISCV/rvv/vfsqrt.ll index c810a516f3b35b94085e20eb72126a96ac115a38..d944375d645cbec80f8aa3835b8493dd6e83d82a 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfsqrt-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfsqrt.ll @@ -1,22 +1,22 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfsqrt.nxv1f16( , - i64); + iXLen); -define @intrinsic_vfsqrt_v_nxv1f16_nxv1f16( +define @intrinsic_vfsqrt_v_nxv1f16_nxv1f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_v_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vfsqrt.v v8, v8 ; CHECK-NEXT: ret - %0, - i64 %1) nounwind { entry: %a = call @llvm.riscv.vfsqrt.nxv1f16( %0, - i64 %1) + iXLen %1) ret %a } @@ -25,45 +25,39 @@ declare @llvm.riscv.vfsqrt.mask.nxv1f16( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfsqrt_mask_v_nxv1f16_nxv1f16( +define @intrinsic_vfsqrt_mask_v_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vfsqrt.v v8, v9, v0.t ; CHECK-NEXT: ret - %0, - %1, - %2, - i64 %3) nounwind { entry: %a = call @llvm.riscv.vfsqrt.mask.nxv1f16( %0, %1, %2, - i64 %3, i64 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfsqrt.nxv2f16( , - i64); + iXLen); -define @intrinsic_vfsqrt_v_nxv2f16_nxv2f16( +define @intrinsic_vfsqrt_v_nxv2f16_nxv2f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_v_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vfsqrt.v v8, v8 ; CHECK-NEXT: ret - %0, - i64 %1) nounwind { entry: %a = call @llvm.riscv.vfsqrt.nxv2f16( %0, - i64 %1) + iXLen %1) ret %a } @@ -72,45 +66,39 @@ declare @llvm.riscv.vfsqrt.mask.nxv2f16( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfsqrt_mask_v_nxv2f16_nxv2f16( +define @intrinsic_vfsqrt_mask_v_nxv2f16_nxv2f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vfsqrt.v v8, v9, v0.t ; CHECK-NEXT: ret - %0, - %1, - %2, - i64 %3) nounwind { entry: %a = call @llvm.riscv.vfsqrt.mask.nxv2f16( %0, %1, %2, - i64 %3, i64 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfsqrt.nxv4f16( , - i64); + iXLen); -define @intrinsic_vfsqrt_v_nxv4f16_nxv4f16( +define @intrinsic_vfsqrt_v_nxv4f16_nxv4f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_v_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vfsqrt.v v8, v8 ; CHECK-NEXT: ret - %0, - i64 %1) nounwind { entry: %a = call @llvm.riscv.vfsqrt.nxv4f16( %0, - i64 %1) + iXLen %1) ret %a } @@ -119,45 +107,39 @@ declare @llvm.riscv.vfsqrt.mask.nxv4f16( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfsqrt_mask_v_nxv4f16_nxv4f16( +define @intrinsic_vfsqrt_mask_v_nxv4f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vfsqrt.v v8, v9, v0.t ; CHECK-NEXT: ret - %0, - %1, - %2, - i64 %3) nounwind { entry: %a = call @llvm.riscv.vfsqrt.mask.nxv4f16( %0, %1, %2, - i64 %3, i64 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfsqrt.nxv8f16( , - i64); + iXLen); -define @intrinsic_vfsqrt_v_nxv8f16_nxv8f16( +define @intrinsic_vfsqrt_v_nxv8f16_nxv8f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_v_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vfsqrt.v v8, v8 ; CHECK-NEXT: ret - %0, - i64 %1) nounwind { entry: %a = call @llvm.riscv.vfsqrt.nxv8f16( %0, - i64 %1) + iXLen %1) ret %a } @@ -166,45 +148,39 @@ declare @llvm.riscv.vfsqrt.mask.nxv8f16( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfsqrt_mask_v_nxv8f16_nxv8f16( +define @intrinsic_vfsqrt_mask_v_nxv8f16_nxv8f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vfsqrt.v v8, v10, v0.t ; CHECK-NEXT: ret - %0, - %1, - %2, - i64 %3) nounwind { entry: %a = call @llvm.riscv.vfsqrt.mask.nxv8f16( %0, %1, %2, - i64 %3, i64 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfsqrt.nxv16f16( , - i64); + iXLen); -define @intrinsic_vfsqrt_v_nxv16f16_nxv16f16( +define @intrinsic_vfsqrt_v_nxv16f16_nxv16f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_v_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vfsqrt.v v8, v8 ; CHECK-NEXT: ret - %0, - i64 %1) nounwind { entry: %a = call @llvm.riscv.vfsqrt.nxv16f16( %0, - i64 %1) + iXLen %1) ret %a } @@ -213,45 +189,39 @@ declare @llvm.riscv.vfsqrt.mask.nxv16f16( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfsqrt_mask_v_nxv16f16_nxv16f16( +define @intrinsic_vfsqrt_mask_v_nxv16f16_nxv16f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vfsqrt.v v8, v12, v0.t ; CHECK-NEXT: ret - %0, - %1, - %2, - i64 %3) nounwind { entry: %a = call @llvm.riscv.vfsqrt.mask.nxv16f16( %0, %1, %2, - i64 %3, i64 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfsqrt.nxv32f16( , - i64); + iXLen); -define @intrinsic_vfsqrt_v_nxv32f16_nxv32f16( +define @intrinsic_vfsqrt_v_nxv32f16_nxv32f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_v_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu ; CHECK-NEXT: vfsqrt.v v8, v8 ; CHECK-NEXT: ret - %0, - i64 %1) nounwind { entry: %a = call @llvm.riscv.vfsqrt.nxv32f16( %0, - i64 %1) + iXLen %1) ret %a } @@ -260,45 +230,39 @@ declare @llvm.riscv.vfsqrt.mask.nxv32f16( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfsqrt_mask_v_nxv32f16_nxv32f16( +define @intrinsic_vfsqrt_mask_v_nxv32f16_nxv32f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu ; CHECK-NEXT: vfsqrt.v v8, v16, v0.t ; CHECK-NEXT: ret - %0, - %1, - %2, - i64 %3) nounwind { entry: %a = call @llvm.riscv.vfsqrt.mask.nxv32f16( %0, %1, %2, - i64 %3, i64 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfsqrt.nxv1f32( , - i64); + iXLen); -define @intrinsic_vfsqrt_v_nxv1f32_nxv1f32( +define @intrinsic_vfsqrt_v_nxv1f32_nxv1f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_v_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vfsqrt.v v8, v8 ; CHECK-NEXT: ret - %0, - i64 %1) nounwind { entry: %a = call @llvm.riscv.vfsqrt.nxv1f32( %0, - i64 %1) + iXLen %1) ret %a } @@ -307,45 +271,39 @@ declare @llvm.riscv.vfsqrt.mask.nxv1f32( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfsqrt_mask_v_nxv1f32_nxv1f32( +define @intrinsic_vfsqrt_mask_v_nxv1f32_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vfsqrt.v v8, v9, v0.t ; CHECK-NEXT: ret - %0, - %1, - %2, - i64 %3) nounwind { entry: %a = call @llvm.riscv.vfsqrt.mask.nxv1f32( %0, %1, %2, - i64 %3, i64 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfsqrt.nxv2f32( , - i64); + iXLen); -define @intrinsic_vfsqrt_v_nxv2f32_nxv2f32( +define @intrinsic_vfsqrt_v_nxv2f32_nxv2f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_v_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfsqrt.v v8, v8 ; CHECK-NEXT: ret - %0, - i64 %1) nounwind { entry: %a = call @llvm.riscv.vfsqrt.nxv2f32( %0, - i64 %1) + iXLen %1) ret %a } @@ -354,45 +312,39 @@ declare @llvm.riscv.vfsqrt.mask.nxv2f32( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfsqrt_mask_v_nxv2f32_nxv2f32( +define @intrinsic_vfsqrt_mask_v_nxv2f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfsqrt.v v8, v9, v0.t ; CHECK-NEXT: ret - %0, - %1, - %2, - i64 %3) nounwind { entry: %a = call @llvm.riscv.vfsqrt.mask.nxv2f32( %0, %1, %2, - i64 %3, i64 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfsqrt.nxv4f32( , - i64); + iXLen); -define @intrinsic_vfsqrt_v_nxv4f32_nxv4f32( +define @intrinsic_vfsqrt_v_nxv4f32_nxv4f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_v_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vfsqrt.v v8, v8 ; CHECK-NEXT: ret - %0, - i64 %1) nounwind { entry: %a = call @llvm.riscv.vfsqrt.nxv4f32( %0, - i64 %1) + iXLen %1) ret %a } @@ -401,45 +353,39 @@ declare @llvm.riscv.vfsqrt.mask.nxv4f32( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfsqrt_mask_v_nxv4f32_nxv4f32( +define @intrinsic_vfsqrt_mask_v_nxv4f32_nxv4f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vfsqrt.v v8, v10, v0.t ; CHECK-NEXT: ret - %0, - %1, - %2, - i64 %3) nounwind { entry: %a = call @llvm.riscv.vfsqrt.mask.nxv4f32( %0, %1, %2, - i64 %3, i64 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfsqrt.nxv8f32( , - i64); + iXLen); -define @intrinsic_vfsqrt_v_nxv8f32_nxv8f32( +define @intrinsic_vfsqrt_v_nxv8f32_nxv8f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_v_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vfsqrt.v v8, v8 ; CHECK-NEXT: ret - %0, - i64 %1) nounwind { entry: %a = call @llvm.riscv.vfsqrt.nxv8f32( %0, - i64 %1) + iXLen %1) ret %a } @@ -448,45 +394,39 @@ declare @llvm.riscv.vfsqrt.mask.nxv8f32( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfsqrt_mask_v_nxv8f32_nxv8f32( +define @intrinsic_vfsqrt_mask_v_nxv8f32_nxv8f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vfsqrt.v v8, v12, v0.t ; CHECK-NEXT: ret - %0, - %1, - %2, - i64 %3) nounwind { entry: %a = call @llvm.riscv.vfsqrt.mask.nxv8f32( %0, %1, %2, - i64 %3, i64 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfsqrt.nxv16f32( , - i64); + iXLen); -define @intrinsic_vfsqrt_v_nxv16f32_nxv16f32( +define @intrinsic_vfsqrt_v_nxv16f32_nxv16f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_v_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vfsqrt.v v8, v8 ; CHECK-NEXT: ret - %0, - i64 %1) nounwind { entry: %a = call @llvm.riscv.vfsqrt.nxv16f32( %0, - i64 %1) + iXLen %1) ret %a } @@ -495,45 +435,39 @@ declare @llvm.riscv.vfsqrt.mask.nxv16f32( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfsqrt_mask_v_nxv16f32_nxv16f32( +define @intrinsic_vfsqrt_mask_v_nxv16f32_nxv16f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vfsqrt.v v8, v16, v0.t ; CHECK-NEXT: ret - %0, - %1, - %2, - i64 %3) nounwind { entry: %a = call @llvm.riscv.vfsqrt.mask.nxv16f32( %0, %1, %2, - i64 %3, i64 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfsqrt.nxv1f64( , - i64); + iXLen); -define @intrinsic_vfsqrt_v_nxv1f64_nxv1f64( +define @intrinsic_vfsqrt_v_nxv1f64_nxv1f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_v_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vfsqrt.v v8, v8 ; CHECK-NEXT: ret - %0, - i64 %1) nounwind { entry: %a = call @llvm.riscv.vfsqrt.nxv1f64( %0, - i64 %1) + iXLen %1) ret %a } @@ -542,45 +476,39 @@ declare @llvm.riscv.vfsqrt.mask.nxv1f64( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfsqrt_mask_v_nxv1f64_nxv1f64( +define @intrinsic_vfsqrt_mask_v_nxv1f64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vfsqrt.v v8, v9, v0.t ; CHECK-NEXT: ret - %0, - %1, - %2, - i64 %3) nounwind { entry: %a = call @llvm.riscv.vfsqrt.mask.nxv1f64( %0, %1, %2, - i64 %3, i64 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfsqrt.nxv2f64( , - i64); + iXLen); -define @intrinsic_vfsqrt_v_nxv2f64_nxv2f64( +define @intrinsic_vfsqrt_v_nxv2f64_nxv2f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_v_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vfsqrt.v v8, v8 ; CHECK-NEXT: ret - %0, - i64 %1) nounwind { entry: %a = call @llvm.riscv.vfsqrt.nxv2f64( %0, - i64 %1) + iXLen %1) ret %a } @@ -589,45 +517,39 @@ declare @llvm.riscv.vfsqrt.mask.nxv2f64( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfsqrt_mask_v_nxv2f64_nxv2f64( +define @intrinsic_vfsqrt_mask_v_nxv2f64_nxv2f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vfsqrt.v v8, v10, v0.t ; CHECK-NEXT: ret - %0, - %1, - %2, - i64 %3) nounwind { entry: %a = call @llvm.riscv.vfsqrt.mask.nxv2f64( %0, %1, %2, - i64 %3, i64 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfsqrt.nxv4f64( , - i64); + iXLen); -define @intrinsic_vfsqrt_v_nxv4f64_nxv4f64( +define @intrinsic_vfsqrt_v_nxv4f64_nxv4f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_v_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vfsqrt.v v8, v8 ; CHECK-NEXT: ret - %0, - i64 %1) nounwind { entry: %a = call @llvm.riscv.vfsqrt.nxv4f64( %0, - i64 %1) + iXLen %1) ret %a } @@ -636,45 +558,39 @@ declare @llvm.riscv.vfsqrt.mask.nxv4f64( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfsqrt_mask_v_nxv4f64_nxv4f64( +define @intrinsic_vfsqrt_mask_v_nxv4f64_nxv4f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vfsqrt.v v8, v12, v0.t ; CHECK-NEXT: ret - %0, - %1, - %2, - i64 %3) nounwind { entry: %a = call @llvm.riscv.vfsqrt.mask.nxv4f64( %0, %1, %2, - i64 %3, i64 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfsqrt.nxv8f64( , - i64); + iXLen); -define @intrinsic_vfsqrt_v_nxv8f64_nxv8f64( +define @intrinsic_vfsqrt_v_nxv8f64_nxv8f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_v_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vfsqrt.v v8, v8 ; CHECK-NEXT: ret - %0, - i64 %1) nounwind { entry: %a = call @llvm.riscv.vfsqrt.nxv8f64( %0, - i64 %1) + iXLen %1) ret %a } @@ -683,25 +599,21 @@ declare @llvm.riscv.vfsqrt.mask.nxv8f64( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfsqrt_mask_v_nxv8f64_nxv8f64( +define @intrinsic_vfsqrt_mask_v_nxv8f64_nxv8f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vfsqrt.v v8, v16, v0.t ; CHECK-NEXT: ret - %0, - %1, - %2, - i64 %3) nounwind { entry: %a = call @llvm.riscv.vfsqrt.mask.nxv8f64( %0, %1, %2, - i64 %3, i64 1) + iXLen %3, iXLen 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsub-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfsub-rv64.ll deleted file mode 100644 index 7445cfb806d43bf016b2037d6db73240bb6dc4ce..0000000000000000000000000000000000000000 --- a/llvm/test/CodeGen/RISCV/rvv/vfsub-rv64.ll +++ /dev/null @@ -1,1356 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v -mattr=+zfh \ -; RUN: -mattr=+d -verify-machineinstrs \ -; RUN: -target-abi=lp64d < %s | FileCheck %s -declare @llvm.riscv.vfsub.nxv1f16.nxv1f16( - , - , - i64); - -define @intrinsic_vfsub_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsub_vv_nxv1f16_nxv1f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfsub.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsub.nxv1f16.nxv1f16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsub.mask.nxv1f16.nxv1f16( - , - , - , - , - i64, - i64); - -define @intrinsic_vfsub_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv1f16_nxv1f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfsub.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsub.mask.nxv1f16.nxv1f16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsub.nxv2f16.nxv2f16( - , - , - i64); - -define @intrinsic_vfsub_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsub_vv_nxv2f16_nxv2f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfsub.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsub.nxv2f16.nxv2f16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsub.mask.nxv2f16.nxv2f16( - , - , - , - , - i64, - i64); - -define @intrinsic_vfsub_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv2f16_nxv2f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfsub.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsub.mask.nxv2f16.nxv2f16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsub.nxv4f16.nxv4f16( - , - , - i64); - -define @intrinsic_vfsub_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsub_vv_nxv4f16_nxv4f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfsub.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsub.nxv4f16.nxv4f16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsub.mask.nxv4f16.nxv4f16( - , - , - , - , - i64, - i64); - -define @intrinsic_vfsub_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv4f16_nxv4f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfsub.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsub.mask.nxv4f16.nxv4f16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsub.nxv8f16.nxv8f16( - , - , - i64); - -define @intrinsic_vfsub_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsub_vv_nxv8f16_nxv8f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfsub.vv v8, v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsub.nxv8f16.nxv8f16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsub.mask.nxv8f16.nxv8f16( - , - , - , - , - i64, - i64); - -define @intrinsic_vfsub_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv8f16_nxv8f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfsub.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsub.mask.nxv8f16.nxv8f16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsub.nxv16f16.nxv16f16( - , - , - i64); - -define @intrinsic_vfsub_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsub_vv_nxv16f16_nxv16f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfsub.vv v8, v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsub.nxv16f16.nxv16f16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsub.mask.nxv16f16.nxv16f16( - , - , - , - , - i64, - i64); - -define @intrinsic_vfsub_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv16f16_nxv16f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfsub.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsub.mask.nxv16f16.nxv16f16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsub.nxv32f16.nxv32f16( - , - , - i64); - -define @intrinsic_vfsub_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsub_vv_nxv32f16_nxv32f16_nxv32f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu -; CHECK-NEXT: vfsub.vv v8, v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsub.nxv32f16.nxv32f16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsub.mask.nxv32f16.nxv32f16( - , - , - , - , - i64, - i64); - -define @intrinsic_vfsub_mask_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv32f16_nxv32f16_nxv32f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu -; CHECK-NEXT: vfsub.vv v8, v16, v24, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsub.mask.nxv32f16.nxv32f16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsub.nxv1f32.nxv1f32( - , - , - i64); - -define @intrinsic_vfsub_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsub_vv_nxv1f32_nxv1f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfsub.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsub.nxv1f32.nxv1f32( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsub.mask.nxv1f32.nxv1f32( - , - , - , - , - i64, - i64); - -define @intrinsic_vfsub_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv1f32_nxv1f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfsub.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsub.mask.nxv1f32.nxv1f32( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsub.nxv2f32.nxv2f32( - , - , - i64); - -define @intrinsic_vfsub_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsub_vv_nxv2f32_nxv2f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfsub.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsub.nxv2f32.nxv2f32( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsub.mask.nxv2f32.nxv2f32( - , - , - , - , - i64, - i64); - -define @intrinsic_vfsub_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv2f32_nxv2f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfsub.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsub.mask.nxv2f32.nxv2f32( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsub.nxv4f32.nxv4f32( - , - , - i64); - -define @intrinsic_vfsub_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsub_vv_nxv4f32_nxv4f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfsub.vv v8, v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsub.nxv4f32.nxv4f32( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsub.mask.nxv4f32.nxv4f32( - , - , - , - , - i64, - i64); - -define @intrinsic_vfsub_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv4f32_nxv4f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfsub.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsub.mask.nxv4f32.nxv4f32( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsub.nxv8f32.nxv8f32( - , - , - i64); - -define @intrinsic_vfsub_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsub_vv_nxv8f32_nxv8f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfsub.vv v8, v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsub.nxv8f32.nxv8f32( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsub.mask.nxv8f32.nxv8f32( - , - , - , - , - i64, - i64); - -define @intrinsic_vfsub_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv8f32_nxv8f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfsub.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsub.mask.nxv8f32.nxv8f32( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsub.nxv16f32.nxv16f32( - , - , - i64); - -define @intrinsic_vfsub_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsub_vv_nxv16f32_nxv16f32_nxv16f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu -; CHECK-NEXT: vfsub.vv v8, v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsub.nxv16f32.nxv16f32( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsub.mask.nxv16f32.nxv16f32( - , - , - , - , - i64, - i64); - -define @intrinsic_vfsub_mask_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv16f32_nxv16f32_nxv16f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vl8re32.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu -; CHECK-NEXT: vfsub.vv v8, v16, v24, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsub.mask.nxv16f32.nxv16f32( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsub.nxv1f64.nxv1f64( - , - , - i64); - -define @intrinsic_vfsub_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsub_vv_nxv1f64_nxv1f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vfsub.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsub.nxv1f64.nxv1f64( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsub.mask.nxv1f64.nxv1f64( - , - , - , - , - i64, - i64); - -define @intrinsic_vfsub_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv1f64_nxv1f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vfsub.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsub.mask.nxv1f64.nxv1f64( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsub.nxv2f64.nxv2f64( - , - , - i64); - -define @intrinsic_vfsub_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsub_vv_nxv2f64_nxv2f64_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vfsub.vv v8, v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsub.nxv2f64.nxv2f64( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsub.mask.nxv2f64.nxv2f64( - , - , - , - , - i64, - i64); - -define @intrinsic_vfsub_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv2f64_nxv2f64_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vfsub.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsub.mask.nxv2f64.nxv2f64( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsub.nxv4f64.nxv4f64( - , - , - i64); - -define @intrinsic_vfsub_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsub_vv_nxv4f64_nxv4f64_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vfsub.vv v8, v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsub.nxv4f64.nxv4f64( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsub.mask.nxv4f64.nxv4f64( - , - , - , - , - i64, - i64); - -define @intrinsic_vfsub_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv4f64_nxv4f64_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vfsub.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsub.mask.nxv4f64.nxv4f64( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsub.nxv8f64.nxv8f64( - , - , - i64); - -define @intrinsic_vfsub_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsub_vv_nxv8f64_nxv8f64_nxv8f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu -; CHECK-NEXT: vfsub.vv v8, v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsub.nxv8f64.nxv8f64( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsub.mask.nxv8f64.nxv8f64( - , - , - , - , - i64, - i64); - -define @intrinsic_vfsub_mask_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv8f64_nxv8f64_nxv8f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vl8re64.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu -; CHECK-NEXT: vfsub.vv v8, v16, v24, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsub.mask.nxv8f64.nxv8f64( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsub.nxv1f16.f16( - , - half, - i64); - -define @intrinsic_vfsub_vf_nxv1f16_nxv1f16_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsub_vf_nxv1f16_nxv1f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfsub.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsub.nxv1f16.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsub.mask.nxv1f16.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfsub_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv1f16_nxv1f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfsub.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsub.mask.nxv1f16.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsub.nxv2f16.f16( - , - half, - i64); - -define @intrinsic_vfsub_vf_nxv2f16_nxv2f16_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsub_vf_nxv2f16_nxv2f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfsub.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsub.nxv2f16.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsub.mask.nxv2f16.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfsub_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv2f16_nxv2f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfsub.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsub.mask.nxv2f16.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsub.nxv4f16.f16( - , - half, - i64); - -define @intrinsic_vfsub_vf_nxv4f16_nxv4f16_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsub_vf_nxv4f16_nxv4f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfsub.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsub.nxv4f16.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsub.mask.nxv4f16.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfsub_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv4f16_nxv4f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfsub.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsub.mask.nxv4f16.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsub.nxv8f16.f16( - , - half, - i64); - -define @intrinsic_vfsub_vf_nxv8f16_nxv8f16_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsub_vf_nxv8f16_nxv8f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfsub.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsub.nxv8f16.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsub.mask.nxv8f16.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfsub_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv8f16_nxv8f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfsub.vf v8, v10, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsub.mask.nxv8f16.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsub.nxv16f16.f16( - , - half, - i64); - -define @intrinsic_vfsub_vf_nxv16f16_nxv16f16_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsub_vf_nxv16f16_nxv16f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfsub.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsub.nxv16f16.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsub.mask.nxv16f16.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfsub_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv16f16_nxv16f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfsub.vf v8, v12, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsub.mask.nxv16f16.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsub.nxv32f16.f16( - , - half, - i64); - -define @intrinsic_vfsub_vf_nxv32f16_nxv32f16_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsub_vf_nxv32f16_nxv32f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu -; CHECK-NEXT: vfsub.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsub.nxv32f16.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsub.mask.nxv32f16.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfsub_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv32f16_nxv32f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu -; CHECK-NEXT: vfsub.vf v8, v16, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsub.mask.nxv32f16.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsub.nxv1f32.f32( - , - float, - i64); - -define @intrinsic_vfsub_vf_nxv1f32_nxv1f32_f32( %0, float %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsub_vf_nxv1f32_nxv1f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfsub.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsub.nxv1f32.f32( - %0, - float %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsub.mask.nxv1f32.f32( - , - , - float, - , - i64, - i64); - -define @intrinsic_vfsub_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv1f32_nxv1f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfsub.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsub.mask.nxv1f32.f32( - %0, - %1, - float %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsub.nxv2f32.f32( - , - float, - i64); - -define @intrinsic_vfsub_vf_nxv2f32_nxv2f32_f32( %0, float %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsub_vf_nxv2f32_nxv2f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfsub.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsub.nxv2f32.f32( - %0, - float %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsub.mask.nxv2f32.f32( - , - , - float, - , - i64, - i64); - -define @intrinsic_vfsub_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv2f32_nxv2f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfsub.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsub.mask.nxv2f32.f32( - %0, - %1, - float %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsub.nxv4f32.f32( - , - float, - i64); - -define @intrinsic_vfsub_vf_nxv4f32_nxv4f32_f32( %0, float %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsub_vf_nxv4f32_nxv4f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfsub.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsub.nxv4f32.f32( - %0, - float %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsub.mask.nxv4f32.f32( - , - , - float, - , - i64, - i64); - -define @intrinsic_vfsub_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv4f32_nxv4f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfsub.vf v8, v10, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsub.mask.nxv4f32.f32( - %0, - %1, - float %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsub.nxv8f32.f32( - , - float, - i64); - -define @intrinsic_vfsub_vf_nxv8f32_nxv8f32_f32( %0, float %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsub_vf_nxv8f32_nxv8f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfsub.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsub.nxv8f32.f32( - %0, - float %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsub.mask.nxv8f32.f32( - , - , - float, - , - i64, - i64); - -define @intrinsic_vfsub_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv8f32_nxv8f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfsub.vf v8, v12, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsub.mask.nxv8f32.f32( - %0, - %1, - float %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsub.nxv16f32.f32( - , - float, - i64); - -define @intrinsic_vfsub_vf_nxv16f32_nxv16f32_f32( %0, float %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsub_vf_nxv16f32_nxv16f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu -; CHECK-NEXT: vfsub.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsub.nxv16f32.f32( - %0, - float %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsub.mask.nxv16f32.f32( - , - , - float, - , - i64, - i64); - -define @intrinsic_vfsub_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv16f32_nxv16f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu -; CHECK-NEXT: vfsub.vf v8, v16, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsub.mask.nxv16f32.f32( - %0, - %1, - float %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsub.nxv1f64.f64( - , - double, - i64); - -define @intrinsic_vfsub_vf_nxv1f64_nxv1f64_f64( %0, double %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsub_vf_nxv1f64_nxv1f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vfsub.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsub.nxv1f64.f64( - %0, - double %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsub.mask.nxv1f64.f64( - , - , - double, - , - i64, - i64); - -define @intrinsic_vfsub_mask_vf_nxv1f64_nxv1f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv1f64_nxv1f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vfsub.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsub.mask.nxv1f64.f64( - %0, - %1, - double %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsub.nxv2f64.f64( - , - double, - i64); - -define @intrinsic_vfsub_vf_nxv2f64_nxv2f64_f64( %0, double %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsub_vf_nxv2f64_nxv2f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vfsub.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsub.nxv2f64.f64( - %0, - double %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsub.mask.nxv2f64.f64( - , - , - double, - , - i64, - i64); - -define @intrinsic_vfsub_mask_vf_nxv2f64_nxv2f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv2f64_nxv2f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vfsub.vf v8, v10, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsub.mask.nxv2f64.f64( - %0, - %1, - double %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsub.nxv4f64.f64( - , - double, - i64); - -define @intrinsic_vfsub_vf_nxv4f64_nxv4f64_f64( %0, double %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsub_vf_nxv4f64_nxv4f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vfsub.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsub.nxv4f64.f64( - %0, - double %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsub.mask.nxv4f64.f64( - , - , - double, - , - i64, - i64); - -define @intrinsic_vfsub_mask_vf_nxv4f64_nxv4f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv4f64_nxv4f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vfsub.vf v8, v12, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsub.mask.nxv4f64.f64( - %0, - %1, - double %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsub.nxv8f64.f64( - , - double, - i64); - -define @intrinsic_vfsub_vf_nxv8f64_nxv8f64_f64( %0, double %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsub_vf_nxv8f64_nxv8f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu -; CHECK-NEXT: vfsub.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsub.nxv8f64.f64( - %0, - double %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsub.mask.nxv8f64.f64( - , - , - double, - , - i64, - i64); - -define @intrinsic_vfsub_mask_vf_nxv8f64_nxv8f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv8f64_nxv8f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu -; CHECK-NEXT: vfsub.vf v8, v16, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsub.mask.nxv8f64.f64( - %0, - %1, - double %2, - %3, - i64 %4, i64 1) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsub-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfsub.ll similarity index 86% rename from llvm/test/CodeGen/RISCV/rvv/vfsub-rv32.ll rename to llvm/test/CodeGen/RISCV/rvv/vfsub.ll index 86371c1685fc07b504aa8da3c6916a931fdbe8ef..645fb340ffa3098c2d9767f23e4c155e03c7e0b0 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfsub-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfsub.ll @@ -1,12 +1,14 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: -target-abi=ilp32d < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfsub.nxv1f16.nxv1f16( , , - i32); + iXLen); -define @intrinsic_vfsub_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, i32 %2) nounwind { +define @intrinsic_vfsub_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -16,7 +18,7 @@ entry: %a = call @llvm.riscv.vfsub.nxv1f16.nxv1f16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -26,10 +28,10 @@ declare @llvm.riscv.vfsub.mask.nxv1f16.nxv1f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsub_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfsub_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -41,7 +43,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -49,9 +51,9 @@ entry: declare @llvm.riscv.vfsub.nxv2f16.nxv2f16( , , - i32); + iXLen); -define @intrinsic_vfsub_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, i32 %2) nounwind { +define @intrinsic_vfsub_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -61,7 +63,7 @@ entry: %a = call @llvm.riscv.vfsub.nxv2f16.nxv2f16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -71,10 +73,10 @@ declare @llvm.riscv.vfsub.mask.nxv2f16.nxv2f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsub_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfsub_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -86,7 +88,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -94,9 +96,9 @@ entry: declare @llvm.riscv.vfsub.nxv4f16.nxv4f16( , , - i32); + iXLen); -define @intrinsic_vfsub_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, i32 %2) nounwind { +define @intrinsic_vfsub_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -106,7 +108,7 @@ entry: %a = call @llvm.riscv.vfsub.nxv4f16.nxv4f16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -116,10 +118,10 @@ declare @llvm.riscv.vfsub.mask.nxv4f16.nxv4f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsub_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfsub_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -131,7 +133,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -139,9 +141,9 @@ entry: declare @llvm.riscv.vfsub.nxv8f16.nxv8f16( , , - i32); + iXLen); -define @intrinsic_vfsub_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, i32 %2) nounwind { +define @intrinsic_vfsub_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -151,7 +153,7 @@ entry: %a = call @llvm.riscv.vfsub.nxv8f16.nxv8f16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -161,10 +163,10 @@ declare @llvm.riscv.vfsub.mask.nxv8f16.nxv8f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsub_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfsub_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -176,7 +178,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -184,9 +186,9 @@ entry: declare @llvm.riscv.vfsub.nxv16f16.nxv16f16( , , - i32); + iXLen); -define @intrinsic_vfsub_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, i32 %2) nounwind { +define @intrinsic_vfsub_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -196,7 +198,7 @@ entry: %a = call @llvm.riscv.vfsub.nxv16f16.nxv16f16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -206,10 +208,10 @@ declare @llvm.riscv.vfsub.mask.nxv16f16.nxv16f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsub_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfsub_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -221,7 +223,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -229,9 +231,9 @@ entry: declare @llvm.riscv.vfsub.nxv32f16.nxv32f16( , , - i32); + iXLen); -define @intrinsic_vfsub_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, i32 %2) nounwind { +define @intrinsic_vfsub_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vv_nxv32f16_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu @@ -241,7 +243,7 @@ entry: %a = call @llvm.riscv.vfsub.nxv32f16.nxv32f16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -251,10 +253,10 @@ declare @llvm.riscv.vfsub.mask.nxv32f16.nxv32f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsub_mask_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfsub_mask_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv32f16_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re16.v v24, (a0) @@ -267,7 +269,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -275,9 +277,9 @@ entry: declare @llvm.riscv.vfsub.nxv1f32.nxv1f32( , , - i32); + iXLen); -define @intrinsic_vfsub_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, i32 %2) nounwind { +define @intrinsic_vfsub_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -287,7 +289,7 @@ entry: %a = call @llvm.riscv.vfsub.nxv1f32.nxv1f32( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -297,10 +299,10 @@ declare @llvm.riscv.vfsub.mask.nxv1f32.nxv1f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsub_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfsub_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -312,7 +314,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -320,9 +322,9 @@ entry: declare @llvm.riscv.vfsub.nxv2f32.nxv2f32( , , - i32); + iXLen); -define @intrinsic_vfsub_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, i32 %2) nounwind { +define @intrinsic_vfsub_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -332,7 +334,7 @@ entry: %a = call @llvm.riscv.vfsub.nxv2f32.nxv2f32( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -342,10 +344,10 @@ declare @llvm.riscv.vfsub.mask.nxv2f32.nxv2f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsub_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfsub_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -357,7 +359,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -365,9 +367,9 @@ entry: declare @llvm.riscv.vfsub.nxv4f32.nxv4f32( , , - i32); + iXLen); -define @intrinsic_vfsub_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, i32 %2) nounwind { +define @intrinsic_vfsub_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -377,7 +379,7 @@ entry: %a = call @llvm.riscv.vfsub.nxv4f32.nxv4f32( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -387,10 +389,10 @@ declare @llvm.riscv.vfsub.mask.nxv4f32.nxv4f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsub_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfsub_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -402,7 +404,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -410,9 +412,9 @@ entry: declare @llvm.riscv.vfsub.nxv8f32.nxv8f32( , , - i32); + iXLen); -define @intrinsic_vfsub_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, i32 %2) nounwind { +define @intrinsic_vfsub_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -422,7 +424,7 @@ entry: %a = call @llvm.riscv.vfsub.nxv8f32.nxv8f32( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -432,10 +434,10 @@ declare @llvm.riscv.vfsub.mask.nxv8f32.nxv8f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsub_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfsub_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -447,7 +449,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -455,9 +457,9 @@ entry: declare @llvm.riscv.vfsub.nxv16f32.nxv16f32( , , - i32); + iXLen); -define @intrinsic_vfsub_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, i32 %2) nounwind { +define @intrinsic_vfsub_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vv_nxv16f32_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu @@ -467,7 +469,7 @@ entry: %a = call @llvm.riscv.vfsub.nxv16f32.nxv16f32( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -477,10 +479,10 @@ declare @llvm.riscv.vfsub.mask.nxv16f32.nxv16f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsub_mask_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfsub_mask_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv16f32_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re32.v v24, (a0) @@ -493,7 +495,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -501,9 +503,9 @@ entry: declare @llvm.riscv.vfsub.nxv1f64.nxv1f64( , , - i32); + iXLen); -define @intrinsic_vfsub_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, i32 %2) nounwind { +define @intrinsic_vfsub_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu @@ -513,7 +515,7 @@ entry: %a = call @llvm.riscv.vfsub.nxv1f64.nxv1f64( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -523,10 +525,10 @@ declare @llvm.riscv.vfsub.mask.nxv1f64.nxv1f64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsub_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfsub_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu @@ -538,7 +540,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -546,9 +548,9 @@ entry: declare @llvm.riscv.vfsub.nxv2f64.nxv2f64( , , - i32); + iXLen); -define @intrinsic_vfsub_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, i32 %2) nounwind { +define @intrinsic_vfsub_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu @@ -558,7 +560,7 @@ entry: %a = call @llvm.riscv.vfsub.nxv2f64.nxv2f64( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -568,10 +570,10 @@ declare @llvm.riscv.vfsub.mask.nxv2f64.nxv2f64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsub_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfsub_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu @@ -583,7 +585,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -591,9 +593,9 @@ entry: declare @llvm.riscv.vfsub.nxv4f64.nxv4f64( , , - i32); + iXLen); -define @intrinsic_vfsub_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, i32 %2) nounwind { +define @intrinsic_vfsub_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu @@ -603,7 +605,7 @@ entry: %a = call @llvm.riscv.vfsub.nxv4f64.nxv4f64( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -613,10 +615,10 @@ declare @llvm.riscv.vfsub.mask.nxv4f64.nxv4f64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsub_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfsub_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu @@ -628,7 +630,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -636,9 +638,9 @@ entry: declare @llvm.riscv.vfsub.nxv8f64.nxv8f64( , , - i32); + iXLen); -define @intrinsic_vfsub_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, i32 %2) nounwind { +define @intrinsic_vfsub_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vv_nxv8f64_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu @@ -648,7 +650,7 @@ entry: %a = call @llvm.riscv.vfsub.nxv8f64.nxv8f64( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -658,10 +660,10 @@ declare @llvm.riscv.vfsub.mask.nxv8f64.nxv8f64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsub_mask_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfsub_mask_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv8f64_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re64.v v24, (a0) @@ -674,7 +676,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -682,9 +684,9 @@ entry: declare @llvm.riscv.vfsub.nxv1f16.f16( , half, - i32); + iXLen); -define @intrinsic_vfsub_vf_nxv1f16_nxv1f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfsub_vf_nxv1f16_nxv1f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -694,7 +696,7 @@ entry: %a = call @llvm.riscv.vfsub.nxv1f16.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -704,10 +706,10 @@ declare @llvm.riscv.vfsub.mask.nxv1f16.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsub_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfsub_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -719,7 +721,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -727,9 +729,9 @@ entry: declare @llvm.riscv.vfsub.nxv2f16.f16( , half, - i32); + iXLen); -define @intrinsic_vfsub_vf_nxv2f16_nxv2f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfsub_vf_nxv2f16_nxv2f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -739,7 +741,7 @@ entry: %a = call @llvm.riscv.vfsub.nxv2f16.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -749,10 +751,10 @@ declare @llvm.riscv.vfsub.mask.nxv2f16.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsub_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfsub_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -764,7 +766,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -772,9 +774,9 @@ entry: declare @llvm.riscv.vfsub.nxv4f16.f16( , half, - i32); + iXLen); -define @intrinsic_vfsub_vf_nxv4f16_nxv4f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfsub_vf_nxv4f16_nxv4f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -784,7 +786,7 @@ entry: %a = call @llvm.riscv.vfsub.nxv4f16.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -794,10 +796,10 @@ declare @llvm.riscv.vfsub.mask.nxv4f16.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsub_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfsub_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -809,7 +811,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -817,9 +819,9 @@ entry: declare @llvm.riscv.vfsub.nxv8f16.f16( , half, - i32); + iXLen); -define @intrinsic_vfsub_vf_nxv8f16_nxv8f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfsub_vf_nxv8f16_nxv8f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -829,7 +831,7 @@ entry: %a = call @llvm.riscv.vfsub.nxv8f16.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -839,10 +841,10 @@ declare @llvm.riscv.vfsub.mask.nxv8f16.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsub_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfsub_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -854,7 +856,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -862,9 +864,9 @@ entry: declare @llvm.riscv.vfsub.nxv16f16.f16( , half, - i32); + iXLen); -define @intrinsic_vfsub_vf_nxv16f16_nxv16f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfsub_vf_nxv16f16_nxv16f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -874,7 +876,7 @@ entry: %a = call @llvm.riscv.vfsub.nxv16f16.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -884,10 +886,10 @@ declare @llvm.riscv.vfsub.mask.nxv16f16.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsub_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfsub_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -899,7 +901,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -907,9 +909,9 @@ entry: declare @llvm.riscv.vfsub.nxv32f16.f16( , half, - i32); + iXLen); -define @intrinsic_vfsub_vf_nxv32f16_nxv32f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfsub_vf_nxv32f16_nxv32f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu @@ -919,7 +921,7 @@ entry: %a = call @llvm.riscv.vfsub.nxv32f16.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -929,10 +931,10 @@ declare @llvm.riscv.vfsub.mask.nxv32f16.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsub_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfsub_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu @@ -944,7 +946,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -952,9 +954,9 @@ entry: declare @llvm.riscv.vfsub.nxv1f32.f32( , float, - i32); + iXLen); -define @intrinsic_vfsub_vf_nxv1f32_nxv1f32_f32( %0, float %1, i32 %2) nounwind { +define @intrinsic_vfsub_vf_nxv1f32_nxv1f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -964,7 +966,7 @@ entry: %a = call @llvm.riscv.vfsub.nxv1f32.f32( %0, float %1, - i32 %2) + iXLen %2) ret %a } @@ -974,10 +976,10 @@ declare @llvm.riscv.vfsub.mask.nxv1f32.f32( , float, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsub_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +define @intrinsic_vfsub_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -989,7 +991,7 @@ entry: %1, float %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -997,9 +999,9 @@ entry: declare @llvm.riscv.vfsub.nxv2f32.f32( , float, - i32); + iXLen); -define @intrinsic_vfsub_vf_nxv2f32_nxv2f32_f32( %0, float %1, i32 %2) nounwind { +define @intrinsic_vfsub_vf_nxv2f32_nxv2f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -1009,7 +1011,7 @@ entry: %a = call @llvm.riscv.vfsub.nxv2f32.f32( %0, float %1, - i32 %2) + iXLen %2) ret %a } @@ -1019,10 +1021,10 @@ declare @llvm.riscv.vfsub.mask.nxv2f32.f32( , float, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsub_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +define @intrinsic_vfsub_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -1034,7 +1036,7 @@ entry: %1, float %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1042,9 +1044,9 @@ entry: declare @llvm.riscv.vfsub.nxv4f32.f32( , float, - i32); + iXLen); -define @intrinsic_vfsub_vf_nxv4f32_nxv4f32_f32( %0, float %1, i32 %2) nounwind { +define @intrinsic_vfsub_vf_nxv4f32_nxv4f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -1054,7 +1056,7 @@ entry: %a = call @llvm.riscv.vfsub.nxv4f32.f32( %0, float %1, - i32 %2) + iXLen %2) ret %a } @@ -1064,10 +1066,10 @@ declare @llvm.riscv.vfsub.mask.nxv4f32.f32( , float, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsub_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +define @intrinsic_vfsub_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -1079,7 +1081,7 @@ entry: %1, float %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1087,9 +1089,9 @@ entry: declare @llvm.riscv.vfsub.nxv8f32.f32( , float, - i32); + iXLen); -define @intrinsic_vfsub_vf_nxv8f32_nxv8f32_f32( %0, float %1, i32 %2) nounwind { +define @intrinsic_vfsub_vf_nxv8f32_nxv8f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -1099,7 +1101,7 @@ entry: %a = call @llvm.riscv.vfsub.nxv8f32.f32( %0, float %1, - i32 %2) + iXLen %2) ret %a } @@ -1109,10 +1111,10 @@ declare @llvm.riscv.vfsub.mask.nxv8f32.f32( , float, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsub_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +define @intrinsic_vfsub_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -1124,7 +1126,7 @@ entry: %1, float %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1132,9 +1134,9 @@ entry: declare @llvm.riscv.vfsub.nxv16f32.f32( , float, - i32); + iXLen); -define @intrinsic_vfsub_vf_nxv16f32_nxv16f32_f32( %0, float %1, i32 %2) nounwind { +define @intrinsic_vfsub_vf_nxv16f32_nxv16f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu @@ -1144,7 +1146,7 @@ entry: %a = call @llvm.riscv.vfsub.nxv16f32.f32( %0, float %1, - i32 %2) + iXLen %2) ret %a } @@ -1154,10 +1156,10 @@ declare @llvm.riscv.vfsub.mask.nxv16f32.f32( , float, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsub_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +define @intrinsic_vfsub_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu @@ -1169,7 +1171,7 @@ entry: %1, float %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1177,9 +1179,9 @@ entry: declare @llvm.riscv.vfsub.nxv1f64.f64( , double, - i32); + iXLen); -define @intrinsic_vfsub_vf_nxv1f64_nxv1f64_f64( %0, double %1, i32 %2) nounwind { +define @intrinsic_vfsub_vf_nxv1f64_nxv1f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu @@ -1189,7 +1191,7 @@ entry: %a = call @llvm.riscv.vfsub.nxv1f64.f64( %0, double %1, - i32 %2) + iXLen %2) ret %a } @@ -1199,10 +1201,10 @@ declare @llvm.riscv.vfsub.mask.nxv1f64.f64( , double, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsub_mask_vf_nxv1f64_nxv1f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { +define @intrinsic_vfsub_mask_vf_nxv1f64_nxv1f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu @@ -1214,7 +1216,7 @@ entry: %1, double %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1222,9 +1224,9 @@ entry: declare @llvm.riscv.vfsub.nxv2f64.f64( , double, - i32); + iXLen); -define @intrinsic_vfsub_vf_nxv2f64_nxv2f64_f64( %0, double %1, i32 %2) nounwind { +define @intrinsic_vfsub_vf_nxv2f64_nxv2f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu @@ -1234,7 +1236,7 @@ entry: %a = call @llvm.riscv.vfsub.nxv2f64.f64( %0, double %1, - i32 %2) + iXLen %2) ret %a } @@ -1244,10 +1246,10 @@ declare @llvm.riscv.vfsub.mask.nxv2f64.f64( , double, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsub_mask_vf_nxv2f64_nxv2f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { +define @intrinsic_vfsub_mask_vf_nxv2f64_nxv2f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu @@ -1259,7 +1261,7 @@ entry: %1, double %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1267,9 +1269,9 @@ entry: declare @llvm.riscv.vfsub.nxv4f64.f64( , double, - i32); + iXLen); -define @intrinsic_vfsub_vf_nxv4f64_nxv4f64_f64( %0, double %1, i32 %2) nounwind { +define @intrinsic_vfsub_vf_nxv4f64_nxv4f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu @@ -1279,7 +1281,7 @@ entry: %a = call @llvm.riscv.vfsub.nxv4f64.f64( %0, double %1, - i32 %2) + iXLen %2) ret %a } @@ -1289,10 +1291,10 @@ declare @llvm.riscv.vfsub.mask.nxv4f64.f64( , double, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsub_mask_vf_nxv4f64_nxv4f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { +define @intrinsic_vfsub_mask_vf_nxv4f64_nxv4f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu @@ -1304,7 +1306,7 @@ entry: %1, double %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1312,9 +1314,9 @@ entry: declare @llvm.riscv.vfsub.nxv8f64.f64( , double, - i32); + iXLen); -define @intrinsic_vfsub_vf_nxv8f64_nxv8f64_f64( %0, double %1, i32 %2) nounwind { +define @intrinsic_vfsub_vf_nxv8f64_nxv8f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu @@ -1324,7 +1326,7 @@ entry: %a = call @llvm.riscv.vfsub.nxv8f64.f64( %0, double %1, - i32 %2) + iXLen %2) ret %a } @@ -1334,10 +1336,10 @@ declare @llvm.riscv.vfsub.mask.nxv8f64.f64( , double, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsub_mask_vf_nxv8f64_nxv8f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { +define @intrinsic_vfsub_mask_vf_nxv8f64_nxv8f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu @@ -1349,7 +1351,7 @@ entry: %1, double %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwadd-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwadd-rv64.ll deleted file mode 100644 index a04b9a54b9306e99c82c38990814ba80c1b239e7..0000000000000000000000000000000000000000 --- a/llvm/test/CodeGen/RISCV/rvv/vfwadd-rv64.ll +++ /dev/null @@ -1,830 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: -target-abi=lp64d < %s | FileCheck %s -declare @llvm.riscv.vfwadd.nxv1f32.nxv1f16.nxv1f16( - , - , - i64); - -define @intrinsic_vfwadd_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwadd_vv_nxv1f32_nxv1f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfwadd.vv v10, v8, v9 -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.nxv1f32.nxv1f16.nxv1f16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfwadd.mask.nxv1f32.nxv1f16.nxv1f16( - , - , - , - , - i64, - i64); - -define @intrinsic_vfwadd_mask_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv1f32_nxv1f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfwadd.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.mask.nxv1f32.nxv1f16.nxv1f16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwadd.nxv2f32.nxv2f16.nxv2f16( - , - , - i64); - -define @intrinsic_vfwadd_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwadd_vv_nxv2f32_nxv2f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfwadd.vv v10, v8, v9 -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.nxv2f32.nxv2f16.nxv2f16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfwadd.mask.nxv2f32.nxv2f16.nxv2f16( - , - , - , - , - i64, - i64); - -define @intrinsic_vfwadd_mask_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv2f32_nxv2f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfwadd.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.mask.nxv2f32.nxv2f16.nxv2f16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwadd.nxv4f32.nxv4f16.nxv4f16( - , - , - i64); - -define @intrinsic_vfwadd_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwadd_vv_nxv4f32_nxv4f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfwadd.vv v10, v8, v9 -; CHECK-NEXT: vmv2r.v v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.nxv4f32.nxv4f16.nxv4f16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfwadd.mask.nxv4f32.nxv4f16.nxv4f16( - , - , - , - , - i64, - i64); - -define @intrinsic_vfwadd_mask_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv4f32_nxv4f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfwadd.vv v8, v10, v11, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.mask.nxv4f32.nxv4f16.nxv4f16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwadd.nxv8f32.nxv8f16.nxv8f16( - , - , - i64); - -define @intrinsic_vfwadd_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwadd_vv_nxv8f32_nxv8f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfwadd.vv v12, v8, v10 -; CHECK-NEXT: vmv4r.v v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.nxv8f32.nxv8f16.nxv8f16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfwadd.mask.nxv8f32.nxv8f16.nxv8f16( - , - , - , - , - i64, - i64); - -define @intrinsic_vfwadd_mask_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv8f32_nxv8f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfwadd.vv v8, v12, v14, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.mask.nxv8f32.nxv8f16.nxv8f16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwadd.nxv16f32.nxv16f16.nxv16f16( - , - , - i64); - -define @intrinsic_vfwadd_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwadd_vv_nxv16f32_nxv16f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfwadd.vv v16, v8, v12 -; CHECK-NEXT: vmv8r.v v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.nxv16f32.nxv16f16.nxv16f16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfwadd.mask.nxv16f32.nxv16f16.nxv16f16( - , - , - , - , - i64, - i64); - -define @intrinsic_vfwadd_mask_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv16f32_nxv16f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfwadd.vv v8, v16, v20, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.mask.nxv16f32.nxv16f16.nxv16f16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwadd.nxv1f64.nxv1f32.nxv1f32( - , - , - i64); - -define @intrinsic_vfwadd_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwadd_vv_nxv1f64_nxv1f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfwadd.vv v10, v8, v9 -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.nxv1f64.nxv1f32.nxv1f32( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfwadd.mask.nxv1f64.nxv1f32.nxv1f32( - , - , - , - , - i64, - i64); - -define @intrinsic_vfwadd_mask_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv1f64_nxv1f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfwadd.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.mask.nxv1f64.nxv1f32.nxv1f32( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwadd.nxv2f64.nxv2f32.nxv2f32( - , - , - i64); - -define @intrinsic_vfwadd_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwadd_vv_nxv2f64_nxv2f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfwadd.vv v10, v8, v9 -; CHECK-NEXT: vmv2r.v v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.nxv2f64.nxv2f32.nxv2f32( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfwadd.mask.nxv2f64.nxv2f32.nxv2f32( - , - , - , - , - i64, - i64); - -define @intrinsic_vfwadd_mask_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv2f64_nxv2f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfwadd.vv v8, v10, v11, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.mask.nxv2f64.nxv2f32.nxv2f32( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwadd.nxv4f64.nxv4f32.nxv4f32( - , - , - i64); - -define @intrinsic_vfwadd_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwadd_vv_nxv4f64_nxv4f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfwadd.vv v12, v8, v10 -; CHECK-NEXT: vmv4r.v v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.nxv4f64.nxv4f32.nxv4f32( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfwadd.mask.nxv4f64.nxv4f32.nxv4f32( - , - , - , - , - i64, - i64); - -define @intrinsic_vfwadd_mask_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv4f64_nxv4f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfwadd.vv v8, v12, v14, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.mask.nxv4f64.nxv4f32.nxv4f32( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwadd.nxv8f64.nxv8f32.nxv8f32( - , - , - i64); - -define @intrinsic_vfwadd_vv_nxv8f64_nxv8f32_nxv8f32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwadd_vv_nxv8f64_nxv8f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfwadd.vv v16, v8, v12 -; CHECK-NEXT: vmv8r.v v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.nxv8f64.nxv8f32.nxv8f32( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfwadd.mask.nxv8f64.nxv8f32.nxv8f32( - , - , - , - , - i64, - i64); - -define @intrinsic_vfwadd_mask_vv_nxv8f64_nxv8f32_nxv8f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv8f64_nxv8f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfwadd.vv v8, v16, v20, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.mask.nxv8f64.nxv8f32.nxv8f32( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwadd.nxv1f32.nxv1f16.f16( - , - half, - i64); - -define @intrinsic_vfwadd_vf_nxv1f32_nxv1f16_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwadd_vf_nxv1f32_nxv1f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfwadd.vf v9, v8, fa0 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.nxv1f32.nxv1f16.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfwadd.mask.nxv1f32.nxv1f16.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfwadd_mask_vf_nxv1f32_nxv1f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv1f32_nxv1f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfwadd.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.mask.nxv1f32.nxv1f16.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwadd.nxv2f32.nxv2f16.f16( - , - half, - i64); - -define @intrinsic_vfwadd_vf_nxv2f32_nxv2f16_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwadd_vf_nxv2f32_nxv2f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfwadd.vf v9, v8, fa0 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.nxv2f32.nxv2f16.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfwadd.mask.nxv2f32.nxv2f16.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfwadd_mask_vf_nxv2f32_nxv2f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv2f32_nxv2f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfwadd.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.mask.nxv2f32.nxv2f16.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwadd.nxv4f32.nxv4f16.f16( - , - half, - i64); - -define @intrinsic_vfwadd_vf_nxv4f32_nxv4f16_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwadd_vf_nxv4f32_nxv4f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfwadd.vf v10, v8, fa0 -; CHECK-NEXT: vmv2r.v v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.nxv4f32.nxv4f16.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfwadd.mask.nxv4f32.nxv4f16.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfwadd_mask_vf_nxv4f32_nxv4f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv4f32_nxv4f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfwadd.vf v8, v10, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.mask.nxv4f32.nxv4f16.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwadd.nxv8f32.nxv8f16.f16( - , - half, - i64); - -define @intrinsic_vfwadd_vf_nxv8f32_nxv8f16_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwadd_vf_nxv8f32_nxv8f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfwadd.vf v12, v8, fa0 -; CHECK-NEXT: vmv4r.v v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.nxv8f32.nxv8f16.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfwadd.mask.nxv8f32.nxv8f16.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfwadd_mask_vf_nxv8f32_nxv8f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv8f32_nxv8f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfwadd.vf v8, v12, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.mask.nxv8f32.nxv8f16.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwadd.nxv16f32.nxv16f16.f16( - , - half, - i64); - -define @intrinsic_vfwadd_vf_nxv16f32_nxv16f16_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwadd_vf_nxv16f32_nxv16f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfwadd.vf v16, v8, fa0 -; CHECK-NEXT: vmv8r.v v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.nxv16f32.nxv16f16.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfwadd.mask.nxv16f32.nxv16f16.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfwadd_mask_vf_nxv16f32_nxv16f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv16f32_nxv16f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfwadd.vf v8, v16, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.mask.nxv16f32.nxv16f16.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwadd.nxv1f64.nxv1f32.f32( - , - float, - i64); - -define @intrinsic_vfwadd_vf_nxv1f64_nxv1f32_f32( %0, float %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwadd_vf_nxv1f64_nxv1f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfwadd.vf v9, v8, fa0 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.nxv1f64.nxv1f32.f32( - %0, - float %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfwadd.mask.nxv1f64.nxv1f32.f32( - , - , - float, - , - i64, - i64); - -define @intrinsic_vfwadd_mask_vf_nxv1f64_nxv1f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv1f64_nxv1f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfwadd.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.mask.nxv1f64.nxv1f32.f32( - %0, - %1, - float %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwadd.nxv2f64.nxv2f32.f32( - , - float, - i64); - -define @intrinsic_vfwadd_vf_nxv2f64_nxv2f32_f32( %0, float %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwadd_vf_nxv2f64_nxv2f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfwadd.vf v10, v8, fa0 -; CHECK-NEXT: vmv2r.v v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.nxv2f64.nxv2f32.f32( - %0, - float %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfwadd.mask.nxv2f64.nxv2f32.f32( - , - , - float, - , - i64, - i64); - -define @intrinsic_vfwadd_mask_vf_nxv2f64_nxv2f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv2f64_nxv2f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfwadd.vf v8, v10, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.mask.nxv2f64.nxv2f32.f32( - %0, - %1, - float %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwadd.nxv4f64.nxv4f32.f32( - , - float, - i64); - -define @intrinsic_vfwadd_vf_nxv4f64_nxv4f32_f32( %0, float %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwadd_vf_nxv4f64_nxv4f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfwadd.vf v12, v8, fa0 -; CHECK-NEXT: vmv4r.v v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.nxv4f64.nxv4f32.f32( - %0, - float %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfwadd.mask.nxv4f64.nxv4f32.f32( - , - , - float, - , - i64, - i64); - -define @intrinsic_vfwadd_mask_vf_nxv4f64_nxv4f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv4f64_nxv4f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfwadd.vf v8, v12, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.mask.nxv4f64.nxv4f32.f32( - %0, - %1, - float %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwadd.nxv8f64.nxv8f32.f32( - , - float, - i64); - -define @intrinsic_vfwadd_vf_nxv8f64_nxv8f32_f32( %0, float %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwadd_vf_nxv8f64_nxv8f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfwadd.vf v16, v8, fa0 -; CHECK-NEXT: vmv8r.v v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.nxv8f64.nxv8f32.f32( - %0, - float %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfwadd.mask.nxv8f64.nxv8f32.f32( - , - , - float, - , - i64, - i64); - -define @intrinsic_vfwadd_mask_vf_nxv8f64_nxv8f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv8f64_nxv8f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfwadd.vf v8, v16, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.mask.nxv8f64.nxv8f32.f32( - %0, - %1, - float %2, - %3, - i64 %4, i64 1) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwadd-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwadd.ll similarity index 86% rename from llvm/test/CodeGen/RISCV/rvv/vfwadd-rv32.ll rename to llvm/test/CodeGen/RISCV/rvv/vfwadd.ll index a3bdcc45732875bed8a50946c846e95344faea69..541f2b8564f280c331fa5b303777423133dfe689 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfwadd-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwadd.ll @@ -1,12 +1,14 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: -target-abi=ilp32d < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfwadd.nxv1f32.nxv1f16.nxv1f16( , , - i32); + iXLen); -define @intrinsic_vfwadd_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, i32 %2) nounwind { +define @intrinsic_vfwadd_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_vv_nxv1f32_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -17,7 +19,7 @@ entry: %a = call @llvm.riscv.vfwadd.nxv1f32.nxv1f16.nxv1f16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -27,10 +29,10 @@ declare @llvm.riscv.vfwadd.mask.nxv1f32.nxv1f16.nxv1f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwadd_mask_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwadd_mask_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv1f32_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -42,7 +44,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -50,9 +52,9 @@ entry: declare @llvm.riscv.vfwadd.nxv2f32.nxv2f16.nxv2f16( , , - i32); + iXLen); -define @intrinsic_vfwadd_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, i32 %2) nounwind { +define @intrinsic_vfwadd_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_vv_nxv2f32_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -63,7 +65,7 @@ entry: %a = call @llvm.riscv.vfwadd.nxv2f32.nxv2f16.nxv2f16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -73,10 +75,10 @@ declare @llvm.riscv.vfwadd.mask.nxv2f32.nxv2f16.nxv2f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwadd_mask_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwadd_mask_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv2f32_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -88,7 +90,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -96,9 +98,9 @@ entry: declare @llvm.riscv.vfwadd.nxv4f32.nxv4f16.nxv4f16( , , - i32); + iXLen); -define @intrinsic_vfwadd_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, i32 %2) nounwind { +define @intrinsic_vfwadd_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_vv_nxv4f32_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -109,7 +111,7 @@ entry: %a = call @llvm.riscv.vfwadd.nxv4f32.nxv4f16.nxv4f16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -119,10 +121,10 @@ declare @llvm.riscv.vfwadd.mask.nxv4f32.nxv4f16.nxv4f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwadd_mask_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwadd_mask_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv4f32_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -134,7 +136,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -142,9 +144,9 @@ entry: declare @llvm.riscv.vfwadd.nxv8f32.nxv8f16.nxv8f16( , , - i32); + iXLen); -define @intrinsic_vfwadd_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, i32 %2) nounwind { +define @intrinsic_vfwadd_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_vv_nxv8f32_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -155,7 +157,7 @@ entry: %a = call @llvm.riscv.vfwadd.nxv8f32.nxv8f16.nxv8f16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -165,10 +167,10 @@ declare @llvm.riscv.vfwadd.mask.nxv8f32.nxv8f16.nxv8f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwadd_mask_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwadd_mask_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv8f32_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -180,7 +182,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -188,9 +190,9 @@ entry: declare @llvm.riscv.vfwadd.nxv16f32.nxv16f16.nxv16f16( , , - i32); + iXLen); -define @intrinsic_vfwadd_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, i32 %2) nounwind { +define @intrinsic_vfwadd_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_vv_nxv16f32_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -201,7 +203,7 @@ entry: %a = call @llvm.riscv.vfwadd.nxv16f32.nxv16f16.nxv16f16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -211,10 +213,10 @@ declare @llvm.riscv.vfwadd.mask.nxv16f32.nxv16f16.nxv16f16 , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwadd_mask_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwadd_mask_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv16f32_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -226,7 +228,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -234,9 +236,9 @@ entry: declare @llvm.riscv.vfwadd.nxv1f64.nxv1f32.nxv1f32( , , - i32); + iXLen); -define @intrinsic_vfwadd_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, i32 %2) nounwind { +define @intrinsic_vfwadd_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_vv_nxv1f64_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -247,7 +249,7 @@ entry: %a = call @llvm.riscv.vfwadd.nxv1f64.nxv1f32.nxv1f32( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -257,10 +259,10 @@ declare @llvm.riscv.vfwadd.mask.nxv1f64.nxv1f32.nxv1f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwadd_mask_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwadd_mask_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv1f64_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -272,7 +274,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -280,9 +282,9 @@ entry: declare @llvm.riscv.vfwadd.nxv2f64.nxv2f32.nxv2f32( , , - i32); + iXLen); -define @intrinsic_vfwadd_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, i32 %2) nounwind { +define @intrinsic_vfwadd_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_vv_nxv2f64_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -293,7 +295,7 @@ entry: %a = call @llvm.riscv.vfwadd.nxv2f64.nxv2f32.nxv2f32( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -303,10 +305,10 @@ declare @llvm.riscv.vfwadd.mask.nxv2f64.nxv2f32.nxv2f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwadd_mask_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwadd_mask_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv2f64_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -318,7 +320,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -326,9 +328,9 @@ entry: declare @llvm.riscv.vfwadd.nxv4f64.nxv4f32.nxv4f32( , , - i32); + iXLen); -define @intrinsic_vfwadd_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, i32 %2) nounwind { +define @intrinsic_vfwadd_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_vv_nxv4f64_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -339,7 +341,7 @@ entry: %a = call @llvm.riscv.vfwadd.nxv4f64.nxv4f32.nxv4f32( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -349,10 +351,10 @@ declare @llvm.riscv.vfwadd.mask.nxv4f64.nxv4f32.nxv4f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwadd_mask_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwadd_mask_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv4f64_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -364,7 +366,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -372,9 +374,9 @@ entry: declare @llvm.riscv.vfwadd.nxv8f64.nxv8f32.nxv8f32( , , - i32); + iXLen); -define @intrinsic_vfwadd_vv_nxv8f64_nxv8f32_nxv8f32( %0, %1, i32 %2) nounwind { +define @intrinsic_vfwadd_vv_nxv8f64_nxv8f32_nxv8f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_vv_nxv8f64_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -385,7 +387,7 @@ entry: %a = call @llvm.riscv.vfwadd.nxv8f64.nxv8f32.nxv8f32( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -395,10 +397,10 @@ declare @llvm.riscv.vfwadd.mask.nxv8f64.nxv8f32.nxv8f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwadd_mask_vv_nxv8f64_nxv8f32_nxv8f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwadd_mask_vv_nxv8f64_nxv8f32_nxv8f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv8f64_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -410,7 +412,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -418,9 +420,9 @@ entry: declare @llvm.riscv.vfwadd.nxv1f32.nxv1f16.f16( , half, - i32); + iXLen); -define @intrinsic_vfwadd_vf_nxv1f32_nxv1f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfwadd_vf_nxv1f32_nxv1f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_vf_nxv1f32_nxv1f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -431,7 +433,7 @@ entry: %a = call @llvm.riscv.vfwadd.nxv1f32.nxv1f16.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -441,10 +443,10 @@ declare @llvm.riscv.vfwadd.mask.nxv1f32.nxv1f16.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwadd_mask_vf_nxv1f32_nxv1f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfwadd_mask_vf_nxv1f32_nxv1f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv1f32_nxv1f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -456,7 +458,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -464,9 +466,9 @@ entry: declare @llvm.riscv.vfwadd.nxv2f32.nxv2f16.f16( , half, - i32); + iXLen); -define @intrinsic_vfwadd_vf_nxv2f32_nxv2f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfwadd_vf_nxv2f32_nxv2f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_vf_nxv2f32_nxv2f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -477,7 +479,7 @@ entry: %a = call @llvm.riscv.vfwadd.nxv2f32.nxv2f16.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -487,10 +489,10 @@ declare @llvm.riscv.vfwadd.mask.nxv2f32.nxv2f16.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwadd_mask_vf_nxv2f32_nxv2f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfwadd_mask_vf_nxv2f32_nxv2f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv2f32_nxv2f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -502,7 +504,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -510,9 +512,9 @@ entry: declare @llvm.riscv.vfwadd.nxv4f32.nxv4f16.f16( , half, - i32); + iXLen); -define @intrinsic_vfwadd_vf_nxv4f32_nxv4f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfwadd_vf_nxv4f32_nxv4f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_vf_nxv4f32_nxv4f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -523,7 +525,7 @@ entry: %a = call @llvm.riscv.vfwadd.nxv4f32.nxv4f16.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -533,10 +535,10 @@ declare @llvm.riscv.vfwadd.mask.nxv4f32.nxv4f16.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwadd_mask_vf_nxv4f32_nxv4f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfwadd_mask_vf_nxv4f32_nxv4f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv4f32_nxv4f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -548,7 +550,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -556,9 +558,9 @@ entry: declare @llvm.riscv.vfwadd.nxv8f32.nxv8f16.f16( , half, - i32); + iXLen); -define @intrinsic_vfwadd_vf_nxv8f32_nxv8f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfwadd_vf_nxv8f32_nxv8f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_vf_nxv8f32_nxv8f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -569,7 +571,7 @@ entry: %a = call @llvm.riscv.vfwadd.nxv8f32.nxv8f16.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -579,10 +581,10 @@ declare @llvm.riscv.vfwadd.mask.nxv8f32.nxv8f16.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwadd_mask_vf_nxv8f32_nxv8f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfwadd_mask_vf_nxv8f32_nxv8f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv8f32_nxv8f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -594,7 +596,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -602,9 +604,9 @@ entry: declare @llvm.riscv.vfwadd.nxv16f32.nxv16f16.f16( , half, - i32); + iXLen); -define @intrinsic_vfwadd_vf_nxv16f32_nxv16f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfwadd_vf_nxv16f32_nxv16f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_vf_nxv16f32_nxv16f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -615,7 +617,7 @@ entry: %a = call @llvm.riscv.vfwadd.nxv16f32.nxv16f16.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -625,10 +627,10 @@ declare @llvm.riscv.vfwadd.mask.nxv16f32.nxv16f16.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwadd_mask_vf_nxv16f32_nxv16f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfwadd_mask_vf_nxv16f32_nxv16f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv16f32_nxv16f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -640,7 +642,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -648,9 +650,9 @@ entry: declare @llvm.riscv.vfwadd.nxv1f64.nxv1f32.f32( , float, - i32); + iXLen); -define @intrinsic_vfwadd_vf_nxv1f64_nxv1f32_f32( %0, float %1, i32 %2) nounwind { +define @intrinsic_vfwadd_vf_nxv1f64_nxv1f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_vf_nxv1f64_nxv1f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -661,7 +663,7 @@ entry: %a = call @llvm.riscv.vfwadd.nxv1f64.nxv1f32.f32( %0, float %1, - i32 %2) + iXLen %2) ret %a } @@ -671,10 +673,10 @@ declare @llvm.riscv.vfwadd.mask.nxv1f64.nxv1f32.f32( , float, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwadd_mask_vf_nxv1f64_nxv1f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +define @intrinsic_vfwadd_mask_vf_nxv1f64_nxv1f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv1f64_nxv1f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -686,7 +688,7 @@ entry: %1, float %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -694,9 +696,9 @@ entry: declare @llvm.riscv.vfwadd.nxv2f64.nxv2f32.f32( , float, - i32); + iXLen); -define @intrinsic_vfwadd_vf_nxv2f64_nxv2f32_f32( %0, float %1, i32 %2) nounwind { +define @intrinsic_vfwadd_vf_nxv2f64_nxv2f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_vf_nxv2f64_nxv2f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -707,7 +709,7 @@ entry: %a = call @llvm.riscv.vfwadd.nxv2f64.nxv2f32.f32( %0, float %1, - i32 %2) + iXLen %2) ret %a } @@ -717,10 +719,10 @@ declare @llvm.riscv.vfwadd.mask.nxv2f64.nxv2f32.f32( , float, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwadd_mask_vf_nxv2f64_nxv2f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +define @intrinsic_vfwadd_mask_vf_nxv2f64_nxv2f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv2f64_nxv2f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -732,7 +734,7 @@ entry: %1, float %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -740,9 +742,9 @@ entry: declare @llvm.riscv.vfwadd.nxv4f64.nxv4f32.f32( , float, - i32); + iXLen); -define @intrinsic_vfwadd_vf_nxv4f64_nxv4f32_f32( %0, float %1, i32 %2) nounwind { +define @intrinsic_vfwadd_vf_nxv4f64_nxv4f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_vf_nxv4f64_nxv4f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -753,7 +755,7 @@ entry: %a = call @llvm.riscv.vfwadd.nxv4f64.nxv4f32.f32( %0, float %1, - i32 %2) + iXLen %2) ret %a } @@ -763,10 +765,10 @@ declare @llvm.riscv.vfwadd.mask.nxv4f64.nxv4f32.f32( , float, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwadd_mask_vf_nxv4f64_nxv4f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +define @intrinsic_vfwadd_mask_vf_nxv4f64_nxv4f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv4f64_nxv4f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -778,7 +780,7 @@ entry: %1, float %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -786,9 +788,9 @@ entry: declare @llvm.riscv.vfwadd.nxv8f64.nxv8f32.f32( , float, - i32); + iXLen); -define @intrinsic_vfwadd_vf_nxv8f64_nxv8f32_f32( %0, float %1, i32 %2) nounwind { +define @intrinsic_vfwadd_vf_nxv8f64_nxv8f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_vf_nxv8f64_nxv8f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -799,7 +801,7 @@ entry: %a = call @llvm.riscv.vfwadd.nxv8f64.nxv8f32.f32( %0, float %1, - i32 %2) + iXLen %2) ret %a } @@ -809,10 +811,10 @@ declare @llvm.riscv.vfwadd.mask.nxv8f64.nxv8f32.f32( , float, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwadd_mask_vf_nxv8f64_nxv8f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +define @intrinsic_vfwadd_mask_vf_nxv8f64_nxv8f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv8f64_nxv8f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -824,7 +826,7 @@ entry: %1, float %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwadd.w-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwadd.w-rv64.ll deleted file mode 100644 index 3586ec64e9dfef9c5c314c7d76d8aeeb9e1c96b9..0000000000000000000000000000000000000000 --- a/llvm/test/CodeGen/RISCV/rvv/vfwadd.w-rv64.ll +++ /dev/null @@ -1,1248 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: -target-abi=lp64d < %s | FileCheck %s -declare @llvm.riscv.vfwadd.w.nxv1f32.nxv1f16( - , - , - i64); - -define @intrinsic_vfwadd.w_wv_nxv1f32_nxv1f32_nxv1f16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv1f32_nxv1f32_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfwadd.wv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.w.nxv1f32.nxv1f16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfwadd.w.mask.nxv1f32.nxv1f16( - , - , - , - , - i64, - i64); - -define @intrinsic_vfwadd.w_mask_wv_nxv1f32_nxv1f32_nxv1f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv1f32_nxv1f32_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfwadd.wv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.w.mask.nxv1f32.nxv1f16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwadd.w.nxv2f32.nxv2f16( - , - , - i64); - -define @intrinsic_vfwadd.w_wv_nxv2f32_nxv2f32_nxv2f16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv2f32_nxv2f32_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfwadd.wv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.w.nxv2f32.nxv2f16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfwadd.w.mask.nxv2f32.nxv2f16( - , - , - , - , - i64, - i64); - -define @intrinsic_vfwadd.w_mask_wv_nxv2f32_nxv2f32_nxv2f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv2f32_nxv2f32_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfwadd.wv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.w.mask.nxv2f32.nxv2f16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwadd.w.nxv4f32.nxv4f16( - , - , - i64); - -define @intrinsic_vfwadd.w_wv_nxv4f32_nxv4f32_nxv4f16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv4f32_nxv4f32_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfwadd.wv v8, v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.w.nxv4f32.nxv4f16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfwadd.w.mask.nxv4f32.nxv4f16( - , - , - , - , - i64, - i64); - -define @intrinsic_vfwadd.w_mask_wv_nxv4f32_nxv4f32_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv4f32_nxv4f32_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfwadd.wv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.w.mask.nxv4f32.nxv4f16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwadd.w.nxv8f32.nxv8f16( - , - , - i64); - -define @intrinsic_vfwadd.w_wv_nxv8f32_nxv8f32_nxv8f16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv8f32_nxv8f32_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfwadd.wv v8, v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.w.nxv8f32.nxv8f16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfwadd.w.mask.nxv8f32.nxv8f16( - , - , - , - , - i64, - i64); - -define @intrinsic_vfwadd.w_mask_wv_nxv8f32_nxv8f32_nxv8f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv8f32_nxv8f32_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfwadd.wv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.w.mask.nxv8f32.nxv8f16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwadd.w.nxv16f32.nxv16f16( - , - , - i64); - -define @intrinsic_vfwadd.w_wv_nxv16f32_nxv16f32_nxv16f16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv16f32_nxv16f32_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfwadd.wv v8, v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.w.nxv16f32.nxv16f16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfwadd.w.mask.nxv16f32.nxv16f16( - , - , - , - , - i64, - i64); - -define @intrinsic_vfwadd.w_mask_wv_nxv16f32_nxv16f32_nxv16f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv16f32_nxv16f32_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vl4re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vfwadd.wv v8, v16, v24, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.w.mask.nxv16f32.nxv16f16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwadd.w.nxv1f64.nxv1f32( - , - , - i64); - -define @intrinsic_vfwadd.w_wv_nxv1f64_nxv1f64_nxv1f32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv1f64_nxv1f64_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfwadd.wv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.w.nxv1f64.nxv1f32( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfwadd.w.mask.nxv1f64.nxv1f32( - , - , - , - , - i64, - i64); - -define @intrinsic_vfwadd.w_mask_wv_nxv1f64_nxv1f64_nxv1f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv1f64_nxv1f64_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfwadd.wv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.w.mask.nxv1f64.nxv1f32( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwadd.w.nxv2f64.nxv2f32( - , - , - i64); - -define @intrinsic_vfwadd.w_wv_nxv2f64_nxv2f64_nxv2f32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv2f64_nxv2f64_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfwadd.wv v8, v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.w.nxv2f64.nxv2f32( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfwadd.w.mask.nxv2f64.nxv2f32( - , - , - , - , - i64, - i64); - -define @intrinsic_vfwadd.w_mask_wv_nxv2f64_nxv2f64_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv2f64_nxv2f64_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfwadd.wv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.w.mask.nxv2f64.nxv2f32( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwadd.w.nxv4f64.nxv4f32( - , - , - i64); - -define @intrinsic_vfwadd.w_wv_nxv4f64_nxv4f64_nxv4f32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv4f64_nxv4f64_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfwadd.wv v8, v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.w.nxv4f64.nxv4f32( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfwadd.w.mask.nxv4f64.nxv4f32( - , - , - , - , - i64, - i64); - -define @intrinsic_vfwadd.w_mask_wv_nxv4f64_nxv4f64_nxv4f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv4f64_nxv4f64_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfwadd.wv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.w.mask.nxv4f64.nxv4f32( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwadd.w.nxv8f64.nxv8f32( - , - , - i64); - -define @intrinsic_vfwadd.w_wv_nxv8f64_nxv8f64_nxv8f32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv8f64_nxv8f64_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfwadd.wv v8, v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.w.nxv8f64.nxv8f32( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfwadd.w.mask.nxv8f64.nxv8f32( - , - , - , - , - i64, - i64); - -define @intrinsic_vfwadd.w_mask_wv_nxv8f64_nxv8f64_nxv8f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv8f64_nxv8f64_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vl4re32.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vfwadd.wv v8, v16, v24, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.w.mask.nxv8f64.nxv8f32( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwadd.w.nxv1f32.f16( - , - half, - i64); - -define @intrinsic_vfwadd.w_wf_nxv1f32_nxv1f32_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv1f32_nxv1f32_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfwadd.wf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.w.nxv1f32.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfwadd.w.mask.nxv1f32.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfwadd.w_mask_wf_nxv1f32_nxv1f32_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv1f32_nxv1f32_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfwadd.wf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.w.mask.nxv1f32.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwadd.w.nxv2f32.f16( - , - half, - i64); - -define @intrinsic_vfwadd.w_wf_nxv2f32_nxv2f32_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv2f32_nxv2f32_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfwadd.wf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.w.nxv2f32.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfwadd.w.mask.nxv2f32.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfwadd.w_mask_wf_nxv2f32_nxv2f32_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv2f32_nxv2f32_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfwadd.wf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.w.mask.nxv2f32.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwadd.w.nxv4f32.f16( - , - half, - i64); - -define @intrinsic_vfwadd.w_wf_nxv4f32_nxv4f32_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv4f32_nxv4f32_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfwadd.wf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.w.nxv4f32.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfwadd.w.mask.nxv4f32.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfwadd.w_mask_wf_nxv4f32_nxv4f32_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv4f32_nxv4f32_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfwadd.wf v8, v10, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.w.mask.nxv4f32.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwadd.w.nxv8f32.f16( - , - half, - i64); - -define @intrinsic_vfwadd.w_wf_nxv8f32_nxv8f32_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv8f32_nxv8f32_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfwadd.wf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.w.nxv8f32.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfwadd.w.mask.nxv8f32.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfwadd.w_mask_wf_nxv8f32_nxv8f32_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv8f32_nxv8f32_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfwadd.wf v8, v12, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.w.mask.nxv8f32.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwadd.w.nxv16f32.f16( - , - half, - i64); - -define @intrinsic_vfwadd.w_wf_nxv16f32_nxv16f32_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv16f32_nxv16f32_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfwadd.wf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.w.nxv16f32.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfwadd.w.mask.nxv16f32.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfwadd.w_mask_wf_nxv16f32_nxv16f32_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv16f32_nxv16f32_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfwadd.wf v8, v16, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.w.mask.nxv16f32.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwadd.w.nxv1f64.f32( - , - float, - i64); - -define @intrinsic_vfwadd.w_wf_nxv1f64_nxv1f64_f32( %0, float %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv1f64_nxv1f64_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfwadd.wf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.w.nxv1f64.f32( - %0, - float %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfwadd.w.mask.nxv1f64.f32( - , - , - float, - , - i64, - i64); - -define @intrinsic_vfwadd.w_mask_wf_nxv1f64_nxv1f64_f32( %0, %1, float %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv1f64_nxv1f64_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfwadd.wf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.w.mask.nxv1f64.f32( - %0, - %1, - float %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwadd.w.nxv2f64.f32( - , - float, - i64); - -define @intrinsic_vfwadd.w_wf_nxv2f64_nxv2f64_f32( %0, float %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv2f64_nxv2f64_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfwadd.wf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.w.nxv2f64.f32( - %0, - float %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfwadd.w.mask.nxv2f64.f32( - , - , - float, - , - i64, - i64); - -define @intrinsic_vfwadd.w_mask_wf_nxv2f64_nxv2f64_f32( %0, %1, float %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv2f64_nxv2f64_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfwadd.wf v8, v10, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.w.mask.nxv2f64.f32( - %0, - %1, - float %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwadd.w.nxv4f64.f32( - , - float, - i64); - -define @intrinsic_vfwadd.w_wf_nxv4f64_nxv4f64_f32( %0, float %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv4f64_nxv4f64_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfwadd.wf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.w.nxv4f64.f32( - %0, - float %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfwadd.w.mask.nxv4f64.f32( - , - , - float, - , - i64, - i64); - -define @intrinsic_vfwadd.w_mask_wf_nxv4f64_nxv4f64_f32( %0, %1, float %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv4f64_nxv4f64_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfwadd.wf v8, v12, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.w.mask.nxv4f64.f32( - %0, - %1, - float %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwadd.w.nxv8f64.f32( - , - float, - i64); - -define @intrinsic_vfwadd.w_wf_nxv8f64_nxv8f64_f32( %0, float %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv8f64_nxv8f64_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfwadd.wf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.w.nxv8f64.f32( - %0, - float %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfwadd.w.mask.nxv8f64.f32( - , - , - float, - , - i64, - i64); - -define @intrinsic_vfwadd.w_mask_wf_nxv8f64_nxv8f64_f32( %0, %1, float %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv8f64_nxv8f64_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfwadd.wf v8, v16, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.w.mask.nxv8f64.f32( - %0, - %1, - float %2, - %3, - i64 %4, i64 1) - - ret %a -} - -define @intrinsic_vfwadd.w_mask_wv_tie_nxv1f32_nxv1f32_nxv1f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_tie_nxv1f32_nxv1f32_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfwadd.wv v8, v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.w.mask.nxv1f32.nxv1f16( - %0, - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -define @intrinsic_vfwadd.w_mask_wv_tie_nxv2f32_nxv2f32_nxv2f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_tie_nxv2f32_nxv2f32_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfwadd.wv v8, v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.w.mask.nxv2f32.nxv2f16( - %0, - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -define @intrinsic_vfwadd.w_mask_wv_tie_nxv4f32_nxv4f32_nxv4f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_tie_nxv4f32_nxv4f32_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfwadd.wv v8, v8, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.w.mask.nxv4f32.nxv4f16( - %0, - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -define @intrinsic_vfwadd.w_mask_wv_tie_nxv8f32_nxv8f32_nxv8f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_tie_nxv8f32_nxv8f32_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfwadd.wv v8, v8, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.w.mask.nxv8f32.nxv8f16( - %0, - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -define @intrinsic_vfwadd.w_mask_wv_tie_nxv16f32_nxv16f32_nxv16f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_tie_nxv16f32_nxv16f32_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfwadd.wv v8, v8, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.w.mask.nxv16f32.nxv16f16( - %0, - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -define @intrinsic_vfwadd.w_mask_wv_tie_nxv1f64_nxv1f64_nxv1f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_tie_nxv1f64_nxv1f64_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfwadd.wv v8, v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.w.mask.nxv1f64.nxv1f32( - %0, - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -define @intrinsic_vfwadd.w_mask_wv_tie_nxv2f64_nxv2f64_nxv2f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_tie_nxv2f64_nxv2f64_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfwadd.wv v8, v8, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.w.mask.nxv2f64.nxv2f32( - %0, - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -define @intrinsic_vfwadd.w_mask_wv_tie_nxv4f64_nxv4f64_nxv4f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_tie_nxv4f64_nxv4f64_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfwadd.wv v8, v8, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.w.mask.nxv4f64.nxv4f32( - %0, - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -define @intrinsic_vfwadd.w_mask_wv_tie_nxv8f64_nxv8f64_nxv8f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_tie_nxv8f64_nxv8f64_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfwadd.wv v8, v8, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.w.mask.nxv8f64.nxv8f32( - %0, - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -define @intrinsic_vfwadd.w_mask_wf_tie_nxv1f32_nxv1f32_f16( %0, half %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_tie_nxv1f32_nxv1f32_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfwadd.wf v8, v8, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.w.mask.nxv1f32.f16( - %0, - %0, - half %1, - %2, - i64 %3, i64 1) - - ret %a -} - -define @intrinsic_vfwadd.w_mask_wf_tie_nxv2f32_nxv2f32_f16( %0, half %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_tie_nxv2f32_nxv2f32_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfwadd.wf v8, v8, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.w.mask.nxv2f32.f16( - %0, - %0, - half %1, - %2, - i64 %3, i64 1) - - ret %a -} - -define @intrinsic_vfwadd.w_mask_wf_tie_nxv4f32_nxv4f32_f16( %0, half %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_tie_nxv4f32_nxv4f32_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfwadd.wf v8, v8, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.w.mask.nxv4f32.f16( - %0, - %0, - half %1, - %2, - i64 %3, i64 1) - - ret %a -} - -define @intrinsic_vfwadd.w_mask_wf_tie_nxv8f32_nxv8f32_f16( %0, half %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_tie_nxv8f32_nxv8f32_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfwadd.wf v8, v8, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.w.mask.nxv8f32.f16( - %0, - %0, - half %1, - %2, - i64 %3, i64 1) - - ret %a -} - -define @intrinsic_vfwadd.w_mask_wf_tie_nxv16f32_nxv16f32_f16( %0, half %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_tie_nxv16f32_nxv16f32_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfwadd.wf v8, v8, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.w.mask.nxv16f32.f16( - %0, - %0, - half %1, - %2, - i64 %3, i64 1) - - ret %a -} - -define @intrinsic_vfwadd.w_mask_wf_tie_nxv1f64_nxv1f64_f32( %0, float %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_tie_nxv1f64_nxv1f64_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfwadd.wf v8, v8, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.w.mask.nxv1f64.f32( - %0, - %0, - float %1, - %2, - i64 %3, i64 1) - - ret %a -} - -define @intrinsic_vfwadd.w_mask_wf_tie_nxv2f64_nxv2f64_f32( %0, float %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_tie_nxv2f64_nxv2f64_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfwadd.wf v8, v8, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.w.mask.nxv2f64.f32( - %0, - %0, - float %1, - %2, - i64 %3, i64 1) - - ret %a -} - -define @intrinsic_vfwadd.w_mask_wf_tie_nxv4f64_nxv4f64_f32( %0, float %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_tie_nxv4f64_nxv4f64_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfwadd.wf v8, v8, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.w.mask.nxv4f64.f32( - %0, - %0, - float %1, - %2, - i64 %3, i64 1) - - ret %a -} - -define @intrinsic_vfwadd.w_mask_wf_tie_nxv8f64_nxv8f64_f32( %0, float %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_tie_nxv8f64_nxv8f64_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfwadd.wf v8, v8, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.w.mask.nxv8f64.f32( - %0, - %0, - float %1, - %2, - i64 %3, i64 1) - - ret %a -} - -define @intrinsic_vfwadd.w_wv_untie_nxv1f32_nxv1f32_nxv1f16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwadd.w_wv_untie_nxv1f32_nxv1f32_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfwadd.wv v10, v9, v8 -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.w.nxv1f32.nxv1f16( - %1, - %0, - i64 %2) - - ret %a -} - -define @intrinsic_vfwadd.w_wv_untie_nxv2f32_nxv2f32_nxv2f16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwadd.w_wv_untie_nxv2f32_nxv2f32_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfwadd.wv v10, v9, v8 -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.w.nxv2f32.nxv2f16( - %1, - %0, - i64 %2) - - ret %a -} - -define @intrinsic_vfwadd.w_wv_untie_nxv4f32_nxv4f32_nxv4f16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwadd.w_wv_untie_nxv4f32_nxv4f32_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfwadd.wv v12, v10, v8 -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.w.nxv4f32.nxv4f16( - %1, - %0, - i64 %2) - - ret %a -} - -define @intrinsic_vfwadd.w_wv_untie_nxv8f32_nxv8f32_nxv8f16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwadd.w_wv_untie_nxv8f32_nxv8f32_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfwadd.wv v16, v12, v8 -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.w.nxv8f32.nxv8f16( - %1, - %0, - i64 %2) - - ret %a -} - -define @intrinsic_vfwadd.w_wv_untie_nxv1f64_nxv1f64_nxv1f32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwadd.w_wv_untie_nxv1f64_nxv1f64_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfwadd.wv v10, v9, v8 -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.w.nxv1f64.nxv1f32( - %1, - %0, - i64 %2) - - ret %a -} - -define @intrinsic_vfwadd.w_wv_untie_nxv2f64_nxv2f64_nxv2f32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwadd.w_wv_untie_nxv2f64_nxv2f64_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfwadd.wv v12, v10, v8 -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.w.nxv2f64.nxv2f32( - %1, - %0, - i64 %2) - - ret %a -} - -define @intrinsic_vfwadd.w_wv_untie_nxv4f64_nxv4f64_nxv4f32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwadd.w_wv_untie_nxv4f64_nxv4f64_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfwadd.wv v16, v12, v8 -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.w.nxv4f64.nxv4f32( - %1, - %0, - i64 %2) - - ret %a -} - -define @intrinsic_vfwadd.w_wv_untie_nxv8f64_nxv8f64_nxv8f32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwadd.w_wv_untie_nxv8f64_nxv8f64_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfwadd.wv v24, v16, v8 -; CHECK-NEXT: vmv8r.v v8, v24 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.w.nxv8f64.nxv8f32( - %1, - %0, - i64 %2) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwadd.w-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwadd.w.ll similarity index 88% rename from llvm/test/CodeGen/RISCV/rvv/vfwadd.w-rv32.ll rename to llvm/test/CodeGen/RISCV/rvv/vfwadd.w.ll index 3d046d2ba805794b3ac73427a73875ca5cf45e1d..28cdfbf621b3d513acd73afd0689fead60cd9499 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfwadd.w-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwadd.w.ll @@ -1,12 +1,14 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: -target-abi=ilp32d < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfwadd.w.nxv1f32.nxv1f16( , , - i32); + iXLen); -define @intrinsic_vfwadd.w_wv_nxv1f32_nxv1f32_nxv1f16( %0, %1, i32 %2) nounwind { +define @intrinsic_vfwadd.w_wv_nxv1f32_nxv1f32_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv1f32_nxv1f32_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -16,7 +18,7 @@ entry: %a = call @llvm.riscv.vfwadd.w.nxv1f32.nxv1f16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -26,10 +28,10 @@ declare @llvm.riscv.vfwadd.w.mask.nxv1f32.nxv1f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwadd.w_mask_wv_nxv1f32_nxv1f32_nxv1f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwadd.w_mask_wv_nxv1f32_nxv1f32_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv1f32_nxv1f32_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -41,7 +43,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -49,9 +51,9 @@ entry: declare @llvm.riscv.vfwadd.w.nxv2f32.nxv2f16( , , - i32); + iXLen); -define @intrinsic_vfwadd.w_wv_nxv2f32_nxv2f32_nxv2f16( %0, %1, i32 %2) nounwind { +define @intrinsic_vfwadd.w_wv_nxv2f32_nxv2f32_nxv2f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv2f32_nxv2f32_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -61,7 +63,7 @@ entry: %a = call @llvm.riscv.vfwadd.w.nxv2f32.nxv2f16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -71,10 +73,10 @@ declare @llvm.riscv.vfwadd.w.mask.nxv2f32.nxv2f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwadd.w_mask_wv_nxv2f32_nxv2f32_nxv2f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwadd.w_mask_wv_nxv2f32_nxv2f32_nxv2f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv2f32_nxv2f32_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -86,7 +88,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -94,9 +96,9 @@ entry: declare @llvm.riscv.vfwadd.w.nxv4f32.nxv4f16( , , - i32); + iXLen); -define @intrinsic_vfwadd.w_wv_nxv4f32_nxv4f32_nxv4f16( %0, %1, i32 %2) nounwind { +define @intrinsic_vfwadd.w_wv_nxv4f32_nxv4f32_nxv4f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv4f32_nxv4f32_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -106,7 +108,7 @@ entry: %a = call @llvm.riscv.vfwadd.w.nxv4f32.nxv4f16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -116,10 +118,10 @@ declare @llvm.riscv.vfwadd.w.mask.nxv4f32.nxv4f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwadd.w_mask_wv_nxv4f32_nxv4f32_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwadd.w_mask_wv_nxv4f32_nxv4f32_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv4f32_nxv4f32_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -131,7 +133,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -139,9 +141,9 @@ entry: declare @llvm.riscv.vfwadd.w.nxv8f32.nxv8f16( , , - i32); + iXLen); -define @intrinsic_vfwadd.w_wv_nxv8f32_nxv8f32_nxv8f16( %0, %1, i32 %2) nounwind { +define @intrinsic_vfwadd.w_wv_nxv8f32_nxv8f32_nxv8f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv8f32_nxv8f32_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -151,7 +153,7 @@ entry: %a = call @llvm.riscv.vfwadd.w.nxv8f32.nxv8f16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -161,10 +163,10 @@ declare @llvm.riscv.vfwadd.w.mask.nxv8f32.nxv8f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwadd.w_mask_wv_nxv8f32_nxv8f32_nxv8f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwadd.w_mask_wv_nxv8f32_nxv8f32_nxv8f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv8f32_nxv8f32_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -176,7 +178,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -184,9 +186,9 @@ entry: declare @llvm.riscv.vfwadd.w.nxv16f32.nxv16f16( , , - i32); + iXLen); -define @intrinsic_vfwadd.w_wv_nxv16f32_nxv16f32_nxv16f16( %0, %1, i32 %2) nounwind { +define @intrinsic_vfwadd.w_wv_nxv16f32_nxv16f32_nxv16f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv16f32_nxv16f32_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -196,7 +198,7 @@ entry: %a = call @llvm.riscv.vfwadd.w.nxv16f32.nxv16f16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -206,10 +208,10 @@ declare @llvm.riscv.vfwadd.w.mask.nxv16f32.nxv16f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwadd.w_mask_wv_nxv16f32_nxv16f32_nxv16f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwadd.w_mask_wv_nxv16f32_nxv16f32_nxv16f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv16f32_nxv16f32_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl4re16.v v24, (a0) @@ -222,7 +224,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -230,9 +232,9 @@ entry: declare @llvm.riscv.vfwadd.w.nxv1f64.nxv1f32( , , - i32); + iXLen); -define @intrinsic_vfwadd.w_wv_nxv1f64_nxv1f64_nxv1f32( %0, %1, i32 %2) nounwind { +define @intrinsic_vfwadd.w_wv_nxv1f64_nxv1f64_nxv1f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv1f64_nxv1f64_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -242,7 +244,7 @@ entry: %a = call @llvm.riscv.vfwadd.w.nxv1f64.nxv1f32( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -252,10 +254,10 @@ declare @llvm.riscv.vfwadd.w.mask.nxv1f64.nxv1f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwadd.w_mask_wv_nxv1f64_nxv1f64_nxv1f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwadd.w_mask_wv_nxv1f64_nxv1f64_nxv1f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv1f64_nxv1f64_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -267,7 +269,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -275,9 +277,9 @@ entry: declare @llvm.riscv.vfwadd.w.nxv2f64.nxv2f32( , , - i32); + iXLen); -define @intrinsic_vfwadd.w_wv_nxv2f64_nxv2f64_nxv2f32( %0, %1, i32 %2) nounwind { +define @intrinsic_vfwadd.w_wv_nxv2f64_nxv2f64_nxv2f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv2f64_nxv2f64_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -287,7 +289,7 @@ entry: %a = call @llvm.riscv.vfwadd.w.nxv2f64.nxv2f32( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -297,10 +299,10 @@ declare @llvm.riscv.vfwadd.w.mask.nxv2f64.nxv2f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwadd.w_mask_wv_nxv2f64_nxv2f64_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwadd.w_mask_wv_nxv2f64_nxv2f64_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv2f64_nxv2f64_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -312,7 +314,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -320,9 +322,9 @@ entry: declare @llvm.riscv.vfwadd.w.nxv4f64.nxv4f32( , , - i32); + iXLen); -define @intrinsic_vfwadd.w_wv_nxv4f64_nxv4f64_nxv4f32( %0, %1, i32 %2) nounwind { +define @intrinsic_vfwadd.w_wv_nxv4f64_nxv4f64_nxv4f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv4f64_nxv4f64_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -332,7 +334,7 @@ entry: %a = call @llvm.riscv.vfwadd.w.nxv4f64.nxv4f32( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -342,10 +344,10 @@ declare @llvm.riscv.vfwadd.w.mask.nxv4f64.nxv4f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwadd.w_mask_wv_nxv4f64_nxv4f64_nxv4f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwadd.w_mask_wv_nxv4f64_nxv4f64_nxv4f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv4f64_nxv4f64_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -357,7 +359,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -365,9 +367,9 @@ entry: declare @llvm.riscv.vfwadd.w.nxv8f64.nxv8f32( , , - i32); + iXLen); -define @intrinsic_vfwadd.w_wv_nxv8f64_nxv8f64_nxv8f32( %0, %1, i32 %2) nounwind { +define @intrinsic_vfwadd.w_wv_nxv8f64_nxv8f64_nxv8f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv8f64_nxv8f64_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -377,7 +379,7 @@ entry: %a = call @llvm.riscv.vfwadd.w.nxv8f64.nxv8f32( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -387,10 +389,10 @@ declare @llvm.riscv.vfwadd.w.mask.nxv8f64.nxv8f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwadd.w_mask_wv_nxv8f64_nxv8f64_nxv8f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwadd.w_mask_wv_nxv8f64_nxv8f64_nxv8f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv8f64_nxv8f64_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl4re32.v v24, (a0) @@ -403,7 +405,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -411,9 +413,9 @@ entry: declare @llvm.riscv.vfwadd.w.nxv1f32.f16( , half, - i32); + iXLen); -define @intrinsic_vfwadd.w_wf_nxv1f32_nxv1f32_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfwadd.w_wf_nxv1f32_nxv1f32_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv1f32_nxv1f32_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -423,7 +425,7 @@ entry: %a = call @llvm.riscv.vfwadd.w.nxv1f32.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -433,10 +435,10 @@ declare @llvm.riscv.vfwadd.w.mask.nxv1f32.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwadd.w_mask_wf_nxv1f32_nxv1f32_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfwadd.w_mask_wf_nxv1f32_nxv1f32_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv1f32_nxv1f32_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -448,7 +450,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -456,9 +458,9 @@ entry: declare @llvm.riscv.vfwadd.w.nxv2f32.f16( , half, - i32); + iXLen); -define @intrinsic_vfwadd.w_wf_nxv2f32_nxv2f32_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfwadd.w_wf_nxv2f32_nxv2f32_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv2f32_nxv2f32_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -468,7 +470,7 @@ entry: %a = call @llvm.riscv.vfwadd.w.nxv2f32.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -478,10 +480,10 @@ declare @llvm.riscv.vfwadd.w.mask.nxv2f32.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwadd.w_mask_wf_nxv2f32_nxv2f32_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfwadd.w_mask_wf_nxv2f32_nxv2f32_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv2f32_nxv2f32_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -493,7 +495,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -501,9 +503,9 @@ entry: declare @llvm.riscv.vfwadd.w.nxv4f32.f16( , half, - i32); + iXLen); -define @intrinsic_vfwadd.w_wf_nxv4f32_nxv4f32_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfwadd.w_wf_nxv4f32_nxv4f32_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv4f32_nxv4f32_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -513,7 +515,7 @@ entry: %a = call @llvm.riscv.vfwadd.w.nxv4f32.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -523,10 +525,10 @@ declare @llvm.riscv.vfwadd.w.mask.nxv4f32.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwadd.w_mask_wf_nxv4f32_nxv4f32_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfwadd.w_mask_wf_nxv4f32_nxv4f32_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv4f32_nxv4f32_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -538,7 +540,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -546,9 +548,9 @@ entry: declare @llvm.riscv.vfwadd.w.nxv8f32.f16( , half, - i32); + iXLen); -define @intrinsic_vfwadd.w_wf_nxv8f32_nxv8f32_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfwadd.w_wf_nxv8f32_nxv8f32_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv8f32_nxv8f32_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -558,7 +560,7 @@ entry: %a = call @llvm.riscv.vfwadd.w.nxv8f32.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -568,10 +570,10 @@ declare @llvm.riscv.vfwadd.w.mask.nxv8f32.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwadd.w_mask_wf_nxv8f32_nxv8f32_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfwadd.w_mask_wf_nxv8f32_nxv8f32_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv8f32_nxv8f32_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -583,7 +585,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -591,9 +593,9 @@ entry: declare @llvm.riscv.vfwadd.w.nxv16f32.f16( , half, - i32); + iXLen); -define @intrinsic_vfwadd.w_wf_nxv16f32_nxv16f32_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfwadd.w_wf_nxv16f32_nxv16f32_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv16f32_nxv16f32_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -603,7 +605,7 @@ entry: %a = call @llvm.riscv.vfwadd.w.nxv16f32.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -613,10 +615,10 @@ declare @llvm.riscv.vfwadd.w.mask.nxv16f32.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwadd.w_mask_wf_nxv16f32_nxv16f32_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfwadd.w_mask_wf_nxv16f32_nxv16f32_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv16f32_nxv16f32_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -628,7 +630,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -636,9 +638,9 @@ entry: declare @llvm.riscv.vfwadd.w.nxv1f64.f32( , float, - i32); + iXLen); -define @intrinsic_vfwadd.w_wf_nxv1f64_nxv1f64_f32( %0, float %1, i32 %2) nounwind { +define @intrinsic_vfwadd.w_wf_nxv1f64_nxv1f64_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv1f64_nxv1f64_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -648,7 +650,7 @@ entry: %a = call @llvm.riscv.vfwadd.w.nxv1f64.f32( %0, float %1, - i32 %2) + iXLen %2) ret %a } @@ -658,10 +660,10 @@ declare @llvm.riscv.vfwadd.w.mask.nxv1f64.f32( , float, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwadd.w_mask_wf_nxv1f64_nxv1f64_f32( %0, %1, float %2, %3, i32 %4) nounwind { +define @intrinsic_vfwadd.w_mask_wf_nxv1f64_nxv1f64_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv1f64_nxv1f64_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -673,7 +675,7 @@ entry: %1, float %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -681,9 +683,9 @@ entry: declare @llvm.riscv.vfwadd.w.nxv2f64.f32( , float, - i32); + iXLen); -define @intrinsic_vfwadd.w_wf_nxv2f64_nxv2f64_f32( %0, float %1, i32 %2) nounwind { +define @intrinsic_vfwadd.w_wf_nxv2f64_nxv2f64_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv2f64_nxv2f64_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -693,7 +695,7 @@ entry: %a = call @llvm.riscv.vfwadd.w.nxv2f64.f32( %0, float %1, - i32 %2) + iXLen %2) ret %a } @@ -703,10 +705,10 @@ declare @llvm.riscv.vfwadd.w.mask.nxv2f64.f32( , float, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwadd.w_mask_wf_nxv2f64_nxv2f64_f32( %0, %1, float %2, %3, i32 %4) nounwind { +define @intrinsic_vfwadd.w_mask_wf_nxv2f64_nxv2f64_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv2f64_nxv2f64_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -718,7 +720,7 @@ entry: %1, float %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -726,9 +728,9 @@ entry: declare @llvm.riscv.vfwadd.w.nxv4f64.f32( , float, - i32); + iXLen); -define @intrinsic_vfwadd.w_wf_nxv4f64_nxv4f64_f32( %0, float %1, i32 %2) nounwind { +define @intrinsic_vfwadd.w_wf_nxv4f64_nxv4f64_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv4f64_nxv4f64_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -738,7 +740,7 @@ entry: %a = call @llvm.riscv.vfwadd.w.nxv4f64.f32( %0, float %1, - i32 %2) + iXLen %2) ret %a } @@ -748,10 +750,10 @@ declare @llvm.riscv.vfwadd.w.mask.nxv4f64.f32( , float, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwadd.w_mask_wf_nxv4f64_nxv4f64_f32( %0, %1, float %2, %3, i32 %4) nounwind { +define @intrinsic_vfwadd.w_mask_wf_nxv4f64_nxv4f64_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv4f64_nxv4f64_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -763,7 +765,7 @@ entry: %1, float %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -771,9 +773,9 @@ entry: declare @llvm.riscv.vfwadd.w.nxv8f64.f32( , float, - i32); + iXLen); -define @intrinsic_vfwadd.w_wf_nxv8f64_nxv8f64_f32( %0, float %1, i32 %2) nounwind { +define @intrinsic_vfwadd.w_wf_nxv8f64_nxv8f64_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv8f64_nxv8f64_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -783,7 +785,7 @@ entry: %a = call @llvm.riscv.vfwadd.w.nxv8f64.f32( %0, float %1, - i32 %2) + iXLen %2) ret %a } @@ -793,10 +795,10 @@ declare @llvm.riscv.vfwadd.w.mask.nxv8f64.f32( , float, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwadd.w_mask_wf_nxv8f64_nxv8f64_f32( %0, %1, float %2, %3, i32 %4) nounwind { +define @intrinsic_vfwadd.w_mask_wf_nxv8f64_nxv8f64_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv8f64_nxv8f64_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -808,12 +810,12 @@ entry: %1, float %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } -define @intrinsic_vfwadd.w_mask_wv_tie_nxv1f32_nxv1f32_nxv1f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwadd.w_mask_wv_tie_nxv1f32_nxv1f32_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_tie_nxv1f32_nxv1f32_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -825,12 +827,12 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } -define @intrinsic_vfwadd.w_mask_wv_tie_nxv2f32_nxv2f32_nxv2f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwadd.w_mask_wv_tie_nxv2f32_nxv2f32_nxv2f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_tie_nxv2f32_nxv2f32_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -842,12 +844,12 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } -define @intrinsic_vfwadd.w_mask_wv_tie_nxv4f32_nxv4f32_nxv4f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwadd.w_mask_wv_tie_nxv4f32_nxv4f32_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_tie_nxv4f32_nxv4f32_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -859,12 +861,12 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } -define @intrinsic_vfwadd.w_mask_wv_tie_nxv8f32_nxv8f32_nxv8f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwadd.w_mask_wv_tie_nxv8f32_nxv8f32_nxv8f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_tie_nxv8f32_nxv8f32_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -876,12 +878,12 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } -define @intrinsic_vfwadd.w_mask_wv_tie_nxv16f32_nxv16f32_nxv16f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwadd.w_mask_wv_tie_nxv16f32_nxv16f32_nxv16f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_tie_nxv16f32_nxv16f32_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -893,12 +895,12 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } -define @intrinsic_vfwadd.w_mask_wv_tie_nxv1f64_nxv1f64_nxv1f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwadd.w_mask_wv_tie_nxv1f64_nxv1f64_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_tie_nxv1f64_nxv1f64_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -910,12 +912,12 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } -define @intrinsic_vfwadd.w_mask_wv_tie_nxv2f64_nxv2f64_nxv2f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwadd.w_mask_wv_tie_nxv2f64_nxv2f64_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_tie_nxv2f64_nxv2f64_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -927,12 +929,12 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } -define @intrinsic_vfwadd.w_mask_wv_tie_nxv4f64_nxv4f64_nxv4f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwadd.w_mask_wv_tie_nxv4f64_nxv4f64_nxv4f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_tie_nxv4f64_nxv4f64_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -944,12 +946,12 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } -define @intrinsic_vfwadd.w_mask_wv_tie_nxv8f64_nxv8f64_nxv8f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwadd.w_mask_wv_tie_nxv8f64_nxv8f64_nxv8f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_tie_nxv8f64_nxv8f64_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -961,12 +963,12 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } -define @intrinsic_vfwadd.w_mask_wf_tie_nxv1f32_nxv1f32_f16( %0, half %1, %2, i32 %3) nounwind { +define @intrinsic_vfwadd.w_mask_wf_tie_nxv1f32_nxv1f32_f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_tie_nxv1f32_nxv1f32_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -978,12 +980,12 @@ entry: %0, half %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } -define @intrinsic_vfwadd.w_mask_wf_tie_nxv2f32_nxv2f32_f16( %0, half %1, %2, i32 %3) nounwind { +define @intrinsic_vfwadd.w_mask_wf_tie_nxv2f32_nxv2f32_f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_tie_nxv2f32_nxv2f32_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -995,12 +997,12 @@ entry: %0, half %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } -define @intrinsic_vfwadd.w_mask_wf_tie_nxv4f32_nxv4f32_f16( %0, half %1, %2, i32 %3) nounwind { +define @intrinsic_vfwadd.w_mask_wf_tie_nxv4f32_nxv4f32_f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_tie_nxv4f32_nxv4f32_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -1012,12 +1014,12 @@ entry: %0, half %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } -define @intrinsic_vfwadd.w_mask_wf_tie_nxv8f32_nxv8f32_f16( %0, half %1, %2, i32 %3) nounwind { +define @intrinsic_vfwadd.w_mask_wf_tie_nxv8f32_nxv8f32_f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_tie_nxv8f32_nxv8f32_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -1029,12 +1031,12 @@ entry: %0, half %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } -define @intrinsic_vfwadd.w_mask_wf_tie_nxv16f32_nxv16f32_f16( %0, half %1, %2, i32 %3) nounwind { +define @intrinsic_vfwadd.w_mask_wf_tie_nxv16f32_nxv16f32_f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_tie_nxv16f32_nxv16f32_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -1046,12 +1048,12 @@ entry: %0, half %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } -define @intrinsic_vfwadd.w_mask_wf_tie_nxv1f64_nxv1f64_f32( %0, float %1, %2, i32 %3) nounwind { +define @intrinsic_vfwadd.w_mask_wf_tie_nxv1f64_nxv1f64_f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_tie_nxv1f64_nxv1f64_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -1063,12 +1065,12 @@ entry: %0, float %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } -define @intrinsic_vfwadd.w_mask_wf_tie_nxv2f64_nxv2f64_f32( %0, float %1, %2, i32 %3) nounwind { +define @intrinsic_vfwadd.w_mask_wf_tie_nxv2f64_nxv2f64_f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_tie_nxv2f64_nxv2f64_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -1080,12 +1082,12 @@ entry: %0, float %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } -define @intrinsic_vfwadd.w_mask_wf_tie_nxv4f64_nxv4f64_f32( %0, float %1, %2, i32 %3) nounwind { +define @intrinsic_vfwadd.w_mask_wf_tie_nxv4f64_nxv4f64_f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_tie_nxv4f64_nxv4f64_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -1097,12 +1099,12 @@ entry: %0, float %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } -define @intrinsic_vfwadd.w_mask_wf_tie_nxv8f64_nxv8f64_f32( %0, float %1, %2, i32 %3) nounwind { +define @intrinsic_vfwadd.w_mask_wf_tie_nxv8f64_nxv8f64_f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_tie_nxv8f64_nxv8f64_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -1114,12 +1116,12 @@ entry: %0, float %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } -define @intrinsic_vfwadd.w_wv_untie_nxv1f32_nxv1f32_nxv1f16( %0, %1, i32 %2) nounwind { +define @intrinsic_vfwadd.w_wv_untie_nxv1f32_nxv1f32_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wv_untie_nxv1f32_nxv1f32_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -1130,12 +1132,12 @@ entry: %a = call @llvm.riscv.vfwadd.w.nxv1f32.nxv1f16( %1, %0, - i32 %2) + iXLen %2) ret %a } -define @intrinsic_vfwadd.w_wv_untie_nxv2f32_nxv2f32_nxv2f16( %0, %1, i32 %2) nounwind { +define @intrinsic_vfwadd.w_wv_untie_nxv2f32_nxv2f32_nxv2f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wv_untie_nxv2f32_nxv2f32_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -1146,12 +1148,12 @@ entry: %a = call @llvm.riscv.vfwadd.w.nxv2f32.nxv2f16( %1, %0, - i32 %2) + iXLen %2) ret %a } -define @intrinsic_vfwadd.w_wv_untie_nxv4f32_nxv4f32_nxv4f16( %0, %1, i32 %2) nounwind { +define @intrinsic_vfwadd.w_wv_untie_nxv4f32_nxv4f32_nxv4f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wv_untie_nxv4f32_nxv4f32_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -1162,12 +1164,12 @@ entry: %a = call @llvm.riscv.vfwadd.w.nxv4f32.nxv4f16( %1, %0, - i32 %2) + iXLen %2) ret %a } -define @intrinsic_vfwadd.w_wv_untie_nxv8f32_nxv8f32_nxv8f16( %0, %1, i32 %2) nounwind { +define @intrinsic_vfwadd.w_wv_untie_nxv8f32_nxv8f32_nxv8f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wv_untie_nxv8f32_nxv8f32_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -1178,12 +1180,12 @@ entry: %a = call @llvm.riscv.vfwadd.w.nxv8f32.nxv8f16( %1, %0, - i32 %2) + iXLen %2) ret %a } -define @intrinsic_vfwadd.w_wv_untie_nxv1f64_nxv1f64_nxv1f32( %0, %1, i32 %2) nounwind { +define @intrinsic_vfwadd.w_wv_untie_nxv1f64_nxv1f64_nxv1f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wv_untie_nxv1f64_nxv1f64_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -1194,12 +1196,12 @@ entry: %a = call @llvm.riscv.vfwadd.w.nxv1f64.nxv1f32( %1, %0, - i32 %2) + iXLen %2) ret %a } -define @intrinsic_vfwadd.w_wv_untie_nxv2f64_nxv2f64_nxv2f32( %0, %1, i32 %2) nounwind { +define @intrinsic_vfwadd.w_wv_untie_nxv2f64_nxv2f64_nxv2f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wv_untie_nxv2f64_nxv2f64_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -1210,12 +1212,12 @@ entry: %a = call @llvm.riscv.vfwadd.w.nxv2f64.nxv2f32( %1, %0, - i32 %2) + iXLen %2) ret %a } -define @intrinsic_vfwadd.w_wv_untie_nxv4f64_nxv4f64_nxv4f32( %0, %1, i32 %2) nounwind { +define @intrinsic_vfwadd.w_wv_untie_nxv4f64_nxv4f64_nxv4f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wv_untie_nxv4f64_nxv4f64_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -1226,12 +1228,12 @@ entry: %a = call @llvm.riscv.vfwadd.w.nxv4f64.nxv4f32( %1, %0, - i32 %2) + iXLen %2) ret %a } -define @intrinsic_vfwadd.w_wv_untie_nxv8f64_nxv8f64_nxv8f32( %0, %1, i32 %2) nounwind { +define @intrinsic_vfwadd.w_wv_untie_nxv8f64_nxv8f64_nxv8f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wv_untie_nxv8f64_nxv8f64_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -1242,7 +1244,7 @@ entry: %a = call @llvm.riscv.vfwadd.w.nxv8f64.nxv8f32( %1, %0, - i32 %2) + iXLen %2) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-f-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-f-rv64.ll deleted file mode 100644 index e050090b7761ca0d300b0c6f99ae39649f08788b..0000000000000000000000000000000000000000 --- a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-f-rv64.ll +++ /dev/null @@ -1,380 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s -declare @llvm.riscv.vfwcvt.f.f.v.nxv1f32.nxv1f16( - , - i64); - -define @intrinsic_vfwcvt_f.f.v_nxv1f32_nxv1f16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_f.f.v_nxv1f32_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfwcvt.f.f.v v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.f.f.v.nxv1f32.nxv1f16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f32.nxv1f16( - , - , - , - i64, - i64); - -define @intrinsic_vfwcvt_mask_f.f.v_nxv1f32_nxv1f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv1f32_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfwcvt.f.f.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f32.nxv1f16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.f.f.v.nxv2f32.nxv2f16( - , - i64); - -define @intrinsic_vfwcvt_f.f.v_nxv2f32_nxv2f16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_f.f.v_nxv2f32_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfwcvt.f.f.v v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.f.f.v.nxv2f32.nxv2f16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.f.f.v.mask.nxv2f32.nxv2f16( - , - , - , - i64, - i64); - -define @intrinsic_vfwcvt_mask_f.f.v_nxv2f32_nxv2f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv2f32_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfwcvt.f.f.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv2f32.nxv2f16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.f.f.v.nxv4f32.nxv4f16( - , - i64); - -define @intrinsic_vfwcvt_f.f.v_nxv4f32_nxv4f16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_f.f.v_nxv4f32_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfwcvt.f.f.v v10, v8 -; CHECK-NEXT: vmv2r.v v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.f.f.v.nxv4f32.nxv4f16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.f.f.v.mask.nxv4f32.nxv4f16( - , - , - , - i64, - i64); - -define @intrinsic_vfwcvt_mask_f.f.v_nxv4f32_nxv4f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv4f32_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfwcvt.f.f.v v8, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv4f32.nxv4f16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.f.f.v.nxv8f32.nxv8f16( - , - i64); - -define @intrinsic_vfwcvt_f.f.v_nxv8f32_nxv8f16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_f.f.v_nxv8f32_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfwcvt.f.f.v v12, v8 -; CHECK-NEXT: vmv4r.v v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.f.f.v.nxv8f32.nxv8f16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.f.f.v.mask.nxv8f32.nxv8f16( - , - , - , - i64, - i64); - -define @intrinsic_vfwcvt_mask_f.f.v_nxv8f32_nxv8f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv8f32_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfwcvt.f.f.v v8, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv8f32.nxv8f16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.f.f.v.nxv16f32.nxv16f16( - , - i64); - -define @intrinsic_vfwcvt_f.f.v_nxv16f32_nxv16f16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_f.f.v_nxv16f32_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfwcvt.f.f.v v16, v8 -; CHECK-NEXT: vmv8r.v v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.f.f.v.nxv16f32.nxv16f16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.f.f.v.mask.nxv16f32.nxv16f16( - , - , - , - i64, - i64); - -define @intrinsic_vfwcvt_mask_f.f.v_nxv16f32_nxv16f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv16f32_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfwcvt.f.f.v v8, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv16f32.nxv16f16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.f.f.v.nxv1f64.nxv1f32( - , - i64); - -define @intrinsic_vfwcvt_f.f.v_nxv1f64_nxv1f32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_f.f.v_nxv1f64_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfwcvt.f.f.v v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.f.f.v.nxv1f64.nxv1f32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f64.nxv1f32( - , - , - , - i64, - i64); - -define @intrinsic_vfwcvt_mask_f.f.v_nxv1f64_nxv1f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv1f64_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfwcvt.f.f.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f64.nxv1f32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.f.f.v.nxv2f64.nxv2f32( - , - i64); - -define @intrinsic_vfwcvt_f.f.v_nxv2f64_nxv2f32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_f.f.v_nxv2f64_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfwcvt.f.f.v v10, v8 -; CHECK-NEXT: vmv2r.v v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.f.f.v.nxv2f64.nxv2f32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.f.f.v.mask.nxv2f64.nxv2f32( - , - , - , - i64, - i64); - -define @intrinsic_vfwcvt_mask_f.f.v_nxv2f64_nxv2f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv2f64_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfwcvt.f.f.v v8, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv2f64.nxv2f32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.f.f.v.nxv4f64.nxv4f32( - , - i64); - -define @intrinsic_vfwcvt_f.f.v_nxv4f64_nxv4f32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_f.f.v_nxv4f64_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfwcvt.f.f.v v12, v8 -; CHECK-NEXT: vmv4r.v v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.f.f.v.nxv4f64.nxv4f32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.f.f.v.mask.nxv4f64.nxv4f32( - , - , - , - i64, - i64); - -define @intrinsic_vfwcvt_mask_f.f.v_nxv4f64_nxv4f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv4f64_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfwcvt.f.f.v v8, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv4f64.nxv4f32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.f.f.v.nxv8f64.nxv8f32( - , - i64); - -define @intrinsic_vfwcvt_f.f.v_nxv8f64_nxv8f32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_f.f.v_nxv8f64_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfwcvt.f.f.v v16, v8 -; CHECK-NEXT: vmv8r.v v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.f.f.v.nxv8f64.nxv8f32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.f.f.v.mask.nxv8f64.nxv8f32( - , - , - , - i64, - i64); - -define @intrinsic_vfwcvt_mask_f.f.v_nxv8f64_nxv8f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv8f64_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfwcvt.f.f.v v8, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv8f64.nxv8f32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-f-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-f.ll similarity index 86% rename from llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-f-rv32.ll rename to llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-f.ll index 460488388b5ea15e5cfd63acf334b1c6108a201a..386fc9a4822abce6022e4635ee00e9853ef05120 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-f-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-f.ll @@ -1,11 +1,13 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfwcvt.f.f.v.nxv1f32.nxv1f16( , - i32); + iXLen); -define @intrinsic_vfwcvt_f.f.v_nxv1f32_nxv1f16( %0, i32 %1) nounwind { +define @intrinsic_vfwcvt_f.f.v_nxv1f32_nxv1f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.f.v_nxv1f32_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -15,7 +17,7 @@ define @intrinsic_vfwcvt_f.f.v_nxv1f32_nxv1f16( @llvm.riscv.vfwcvt.f.f.v.nxv1f32.nxv1f16( %0, - i32 %1) + iXLen %1) ret %a } @@ -24,10 +26,10 @@ declare @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f32.nxv1f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwcvt_mask_f.f.v_nxv1f32_nxv1f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwcvt_mask_f.f.v_nxv1f32_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv1f32_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -38,16 +40,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfwcvt.f.f.v.nxv2f32.nxv2f16( , - i32); + iXLen); -define @intrinsic_vfwcvt_f.f.v_nxv2f32_nxv2f16( %0, i32 %1) nounwind { +define @intrinsic_vfwcvt_f.f.v_nxv2f32_nxv2f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.f.v_nxv2f32_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -57,7 +59,7 @@ define @intrinsic_vfwcvt_f.f.v_nxv2f32_nxv2f16( @llvm.riscv.vfwcvt.f.f.v.nxv2f32.nxv2f16( %0, - i32 %1) + iXLen %1) ret %a } @@ -66,10 +68,10 @@ declare @llvm.riscv.vfwcvt.f.f.v.mask.nxv2f32.nxv2f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwcvt_mask_f.f.v_nxv2f32_nxv2f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwcvt_mask_f.f.v_nxv2f32_nxv2f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv2f32_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -80,16 +82,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfwcvt.f.f.v.nxv4f32.nxv4f16( , - i32); + iXLen); -define @intrinsic_vfwcvt_f.f.v_nxv4f32_nxv4f16( %0, i32 %1) nounwind { +define @intrinsic_vfwcvt_f.f.v_nxv4f32_nxv4f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.f.v_nxv4f32_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -99,7 +101,7 @@ define @intrinsic_vfwcvt_f.f.v_nxv4f32_nxv4f16( @llvm.riscv.vfwcvt.f.f.v.nxv4f32.nxv4f16( %0, - i32 %1) + iXLen %1) ret %a } @@ -108,10 +110,10 @@ declare @llvm.riscv.vfwcvt.f.f.v.mask.nxv4f32.nxv4f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwcvt_mask_f.f.v_nxv4f32_nxv4f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwcvt_mask_f.f.v_nxv4f32_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv4f32_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -122,16 +124,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfwcvt.f.f.v.nxv8f32.nxv8f16( , - i32); + iXLen); -define @intrinsic_vfwcvt_f.f.v_nxv8f32_nxv8f16( %0, i32 %1) nounwind { +define @intrinsic_vfwcvt_f.f.v_nxv8f32_nxv8f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.f.v_nxv8f32_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -141,7 +143,7 @@ define @intrinsic_vfwcvt_f.f.v_nxv8f32_nxv8f16( @llvm.riscv.vfwcvt.f.f.v.nxv8f32.nxv8f16( %0, - i32 %1) + iXLen %1) ret %a } @@ -150,10 +152,10 @@ declare @llvm.riscv.vfwcvt.f.f.v.mask.nxv8f32.nxv8f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwcvt_mask_f.f.v_nxv8f32_nxv8f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwcvt_mask_f.f.v_nxv8f32_nxv8f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv8f32_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -164,16 +166,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfwcvt.f.f.v.nxv16f32.nxv16f16( , - i32); + iXLen); -define @intrinsic_vfwcvt_f.f.v_nxv16f32_nxv16f16( %0, i32 %1) nounwind { +define @intrinsic_vfwcvt_f.f.v_nxv16f32_nxv16f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.f.v_nxv16f32_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -183,7 +185,7 @@ define @intrinsic_vfwcvt_f.f.v_nxv16f32_nxv16f16( @llvm.riscv.vfwcvt.f.f.v.nxv16f32.nxv16f16( %0, - i32 %1) + iXLen %1) ret %a } @@ -192,10 +194,10 @@ declare @llvm.riscv.vfwcvt.f.f.v.mask.nxv16f32.nxv16f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwcvt_mask_f.f.v_nxv16f32_nxv16f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwcvt_mask_f.f.v_nxv16f32_nxv16f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv16f32_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -206,16 +208,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfwcvt.f.f.v.nxv1f64.nxv1f32( , - i32); + iXLen); -define @intrinsic_vfwcvt_f.f.v_nxv1f64_nxv1f32( %0, i32 %1) nounwind { +define @intrinsic_vfwcvt_f.f.v_nxv1f64_nxv1f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.f.v_nxv1f64_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -225,7 +227,7 @@ define @intrinsic_vfwcvt_f.f.v_nxv1f64_nxv1f32( @llvm.riscv.vfwcvt.f.f.v.nxv1f64.nxv1f32( %0, - i32 %1) + iXLen %1) ret %a } @@ -234,10 +236,10 @@ declare @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f64.nxv1f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwcvt_mask_f.f.v_nxv1f64_nxv1f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwcvt_mask_f.f.v_nxv1f64_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv1f64_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -248,16 +250,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfwcvt.f.f.v.nxv2f64.nxv2f32( , - i32); + iXLen); -define @intrinsic_vfwcvt_f.f.v_nxv2f64_nxv2f32( %0, i32 %1) nounwind { +define @intrinsic_vfwcvt_f.f.v_nxv2f64_nxv2f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.f.v_nxv2f64_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -267,7 +269,7 @@ define @intrinsic_vfwcvt_f.f.v_nxv2f64_nxv2f32( @llvm.riscv.vfwcvt.f.f.v.nxv2f64.nxv2f32( %0, - i32 %1) + iXLen %1) ret %a } @@ -276,10 +278,10 @@ declare @llvm.riscv.vfwcvt.f.f.v.mask.nxv2f64.nxv2f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwcvt_mask_f.f.v_nxv2f64_nxv2f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwcvt_mask_f.f.v_nxv2f64_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv2f64_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -290,16 +292,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfwcvt.f.f.v.nxv4f64.nxv4f32( , - i32); + iXLen); -define @intrinsic_vfwcvt_f.f.v_nxv4f64_nxv4f32( %0, i32 %1) nounwind { +define @intrinsic_vfwcvt_f.f.v_nxv4f64_nxv4f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.f.v_nxv4f64_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -309,7 +311,7 @@ define @intrinsic_vfwcvt_f.f.v_nxv4f64_nxv4f32( @llvm.riscv.vfwcvt.f.f.v.nxv4f64.nxv4f32( %0, - i32 %1) + iXLen %1) ret %a } @@ -318,10 +320,10 @@ declare @llvm.riscv.vfwcvt.f.f.v.mask.nxv4f64.nxv4f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwcvt_mask_f.f.v_nxv4f64_nxv4f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwcvt_mask_f.f.v_nxv4f64_nxv4f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv4f64_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -332,16 +334,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfwcvt.f.f.v.nxv8f64.nxv8f32( , - i32); + iXLen); -define @intrinsic_vfwcvt_f.f.v_nxv8f64_nxv8f32( %0, i32 %1) nounwind { +define @intrinsic_vfwcvt_f.f.v_nxv8f64_nxv8f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.f.v_nxv8f64_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -351,7 +353,7 @@ define @intrinsic_vfwcvt_f.f.v_nxv8f64_nxv8f32( @llvm.riscv.vfwcvt.f.f.v.nxv8f64.nxv8f32( %0, - i32 %1) + iXLen %1) ret %a } @@ -360,10 +362,10 @@ declare @llvm.riscv.vfwcvt.f.f.v.mask.nxv8f64.nxv8f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwcvt_mask_f.f.v_nxv8f64_nxv8f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwcvt_mask_f.f.v_nxv8f64_nxv8f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv8f64_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -374,7 +376,7 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-x-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-x-rv64.ll deleted file mode 100644 index e294cbe085f736f84acaddc231614aeefca9abbc..0000000000000000000000000000000000000000 --- a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-x-rv64.ll +++ /dev/null @@ -1,632 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s -declare @llvm.riscv.vfwcvt.f.x.v.nxv1f16.nxv1i8( - , - i64); - -define @intrinsic_vfwcvt_f.x.v_nxv1f16_nxv1i8( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv1f16_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu -; CHECK-NEXT: vfwcvt.f.x.v v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.f.x.v.nxv1f16.nxv1i8( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f16.nxv1i8( - , - , - , - i64, - i64); - -define @intrinsic_vfwcvt_mask_f.x.v_nxv1f16_nxv1i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv1f16_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu -; CHECK-NEXT: vfwcvt.f.x.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f16.nxv1i8( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.f.x.v.nxv2f16.nxv2i8( - , - i64); - -define @intrinsic_vfwcvt_f.x.v_nxv2f16_nxv2i8( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv2f16_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu -; CHECK-NEXT: vfwcvt.f.x.v v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.f.x.v.nxv2f16.nxv2i8( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.f.x.v.mask.nxv2f16.nxv2i8( - , - , - , - i64, - i64); - -define @intrinsic_vfwcvt_mask_f.x.v_nxv2f16_nxv2i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv2f16_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu -; CHECK-NEXT: vfwcvt.f.x.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv2f16.nxv2i8( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.f.x.v.nxv4f16.nxv4i8( - , - i64); - -define @intrinsic_vfwcvt_f.x.v_nxv4f16_nxv4i8( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv4f16_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu -; CHECK-NEXT: vfwcvt.f.x.v v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.f.x.v.nxv4f16.nxv4i8( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.f.x.v.mask.nxv4f16.nxv4i8( - , - , - , - i64, - i64); - -define @intrinsic_vfwcvt_mask_f.x.v_nxv4f16_nxv4i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv4f16_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu -; CHECK-NEXT: vfwcvt.f.x.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv4f16.nxv4i8( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.f.x.v.nxv8f16.nxv8i8( - , - i64); - -define @intrinsic_vfwcvt_f.x.v_nxv8f16_nxv8i8( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv8f16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu -; CHECK-NEXT: vfwcvt.f.x.v v10, v8 -; CHECK-NEXT: vmv2r.v v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.f.x.v.nxv8f16.nxv8i8( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.f.x.v.mask.nxv8f16.nxv8i8( - , - , - , - i64, - i64); - -define @intrinsic_vfwcvt_mask_f.x.v_nxv8f16_nxv8i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv8f16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu -; CHECK-NEXT: vfwcvt.f.x.v v8, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv8f16.nxv8i8( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.f.x.v.nxv16f16.nxv16i8( - , - i64); - -define @intrinsic_vfwcvt_f.x.v_nxv16f16_nxv16i8( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv16f16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu -; CHECK-NEXT: vfwcvt.f.x.v v12, v8 -; CHECK-NEXT: vmv4r.v v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.f.x.v.nxv16f16.nxv16i8( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.f.x.v.mask.nxv16f16.nxv16i8( - , - , - , - i64, - i64); - -define @intrinsic_vfwcvt_mask_f.x.v_nxv16f16_nxv16i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv16f16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu -; CHECK-NEXT: vfwcvt.f.x.v v8, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv16f16.nxv16i8( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.f.x.v.nxv32f16.nxv32i8( - , - i64); - -define @intrinsic_vfwcvt_f.x.v_nxv32f16_nxv32i8( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv32f16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu -; CHECK-NEXT: vfwcvt.f.x.v v16, v8 -; CHECK-NEXT: vmv8r.v v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.f.x.v.nxv32f16.nxv32i8( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.f.x.v.mask.nxv32f16.nxv32i8( - , - , - , - i64, - i64); - -define @intrinsic_vfwcvt_mask_f.x.v_nxv32f16_nxv32i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv32f16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu -; CHECK-NEXT: vfwcvt.f.x.v v8, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv32f16.nxv32i8( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.f.x.v.nxv1f32.nxv1i16( - , - i64); - -define @intrinsic_vfwcvt_f.x.v_nxv1f32_nxv1i16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv1f32_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfwcvt.f.x.v v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.f.x.v.nxv1f32.nxv1i16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f32.nxv1i16( - , - , - , - i64, - i64); - -define @intrinsic_vfwcvt_mask_f.x.v_nxv1f32_nxv1i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv1f32_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfwcvt.f.x.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f32.nxv1i16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.f.x.v.nxv2f32.nxv2i16( - , - i64); - -define @intrinsic_vfwcvt_f.x.v_nxv2f32_nxv2i16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv2f32_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfwcvt.f.x.v v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.f.x.v.nxv2f32.nxv2i16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.f.x.v.mask.nxv2f32.nxv2i16( - , - , - , - i64, - i64); - -define @intrinsic_vfwcvt_mask_f.x.v_nxv2f32_nxv2i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv2f32_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfwcvt.f.x.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv2f32.nxv2i16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.f.x.v.nxv4f32.nxv4i16( - , - i64); - -define @intrinsic_vfwcvt_f.x.v_nxv4f32_nxv4i16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv4f32_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfwcvt.f.x.v v10, v8 -; CHECK-NEXT: vmv2r.v v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.f.x.v.nxv4f32.nxv4i16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.f.x.v.mask.nxv4f32.nxv4i16( - , - , - , - i64, - i64); - -define @intrinsic_vfwcvt_mask_f.x.v_nxv4f32_nxv4i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv4f32_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfwcvt.f.x.v v8, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv4f32.nxv4i16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.f.x.v.nxv8f32.nxv8i16( - , - i64); - -define @intrinsic_vfwcvt_f.x.v_nxv8f32_nxv8i16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv8f32_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfwcvt.f.x.v v12, v8 -; CHECK-NEXT: vmv4r.v v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.f.x.v.nxv8f32.nxv8i16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.f.x.v.mask.nxv8f32.nxv8i16( - , - , - , - i64, - i64); - -define @intrinsic_vfwcvt_mask_f.x.v_nxv8f32_nxv8i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv8f32_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfwcvt.f.x.v v8, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv8f32.nxv8i16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.f.x.v.nxv16f32.nxv16i16( - , - i64); - -define @intrinsic_vfwcvt_f.x.v_nxv16f32_nxv16i16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv16f32_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfwcvt.f.x.v v16, v8 -; CHECK-NEXT: vmv8r.v v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.f.x.v.nxv16f32.nxv16i16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.f.x.v.mask.nxv16f32.nxv16i16( - , - , - , - i64, - i64); - -define @intrinsic_vfwcvt_mask_f.x.v_nxv16f32_nxv16i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv16f32_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfwcvt.f.x.v v8, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv16f32.nxv16i16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.f.x.v.nxv1f64.nxv1i32( - , - i64); - -define @intrinsic_vfwcvt_f.x.v_nxv1f64_nxv1i32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv1f64_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfwcvt.f.x.v v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.f.x.v.nxv1f64.nxv1i32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f64.nxv1i32( - , - , - , - i64, - i64); - -define @intrinsic_vfwcvt_mask_f.x.v_nxv1f64_nxv1i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv1f64_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfwcvt.f.x.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f64.nxv1i32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.f.x.v.nxv2f64.nxv2i32( - , - i64); - -define @intrinsic_vfwcvt_f.x.v_nxv2f64_nxv2i32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv2f64_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfwcvt.f.x.v v10, v8 -; CHECK-NEXT: vmv2r.v v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.f.x.v.nxv2f64.nxv2i32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.f.x.v.mask.nxv2f64.nxv2i32( - , - , - , - i64, - i64); - -define @intrinsic_vfwcvt_mask_f.x.v_nxv2f64_nxv2i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv2f64_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfwcvt.f.x.v v8, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv2f64.nxv2i32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.f.x.v.nxv4f64.nxv4i32( - , - i64); - -define @intrinsic_vfwcvt_f.x.v_nxv4f64_nxv4i32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv4f64_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfwcvt.f.x.v v12, v8 -; CHECK-NEXT: vmv4r.v v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.f.x.v.nxv4f64.nxv4i32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.f.x.v.mask.nxv4f64.nxv4i32( - , - , - , - i64, - i64); - -define @intrinsic_vfwcvt_mask_f.x.v_nxv4f64_nxv4i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv4f64_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfwcvt.f.x.v v8, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv4f64.nxv4i32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.f.x.v.nxv8f64.nxv8i32( - , - i64); - -define @intrinsic_vfwcvt_f.x.v_nxv8f64_nxv8i32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv8f64_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfwcvt.f.x.v v16, v8 -; CHECK-NEXT: vmv8r.v v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.f.x.v.nxv8f64.nxv8i32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.f.x.v.mask.nxv8f64.nxv8i32( - , - , - , - i64, - i64); - -define @intrinsic_vfwcvt_mask_f.x.v_nxv8f64_nxv8i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv8f64_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfwcvt.f.x.v v8, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv8f64.nxv8i32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-x-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-x.ll similarity index 87% rename from llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-x-rv32.ll rename to llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-x.ll index 467b64c20feddaa42b906f8beb530ad58aadacfe..ad4a3a4a5eb6a427044bcd6314505088b3994a70 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-x-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-x.ll @@ -1,11 +1,13 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfwcvt.f.x.v.nxv1f16.nxv1i8( , - i32); + iXLen); -define @intrinsic_vfwcvt_f.x.v_nxv1f16_nxv1i8( %0, i32 %1) nounwind { +define @intrinsic_vfwcvt_f.x.v_nxv1f16_nxv1i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu @@ -15,7 +17,7 @@ define @intrinsic_vfwcvt_f.x.v_nxv1f16_nxv1i8( @llvm.riscv.vfwcvt.f.x.v.nxv1f16.nxv1i8( %0, - i32 %1) + iXLen %1) ret %a } @@ -24,10 +26,10 @@ declare @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f16.nxv1i8( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwcvt_mask_f.x.v_nxv1f16_nxv1i8( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwcvt_mask_f.x.v_nxv1f16_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu @@ -38,16 +40,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfwcvt.f.x.v.nxv2f16.nxv2i8( , - i32); + iXLen); -define @intrinsic_vfwcvt_f.x.v_nxv2f16_nxv2i8( %0, i32 %1) nounwind { +define @intrinsic_vfwcvt_f.x.v_nxv2f16_nxv2i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu @@ -57,7 +59,7 @@ define @intrinsic_vfwcvt_f.x.v_nxv2f16_nxv2i8( @llvm.riscv.vfwcvt.f.x.v.nxv2f16.nxv2i8( %0, - i32 %1) + iXLen %1) ret %a } @@ -66,10 +68,10 @@ declare @llvm.riscv.vfwcvt.f.x.v.mask.nxv2f16.nxv2i8( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwcvt_mask_f.x.v_nxv2f16_nxv2i8( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwcvt_mask_f.x.v_nxv2f16_nxv2i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu @@ -80,16 +82,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfwcvt.f.x.v.nxv4f16.nxv4i8( , - i32); + iXLen); -define @intrinsic_vfwcvt_f.x.v_nxv4f16_nxv4i8( %0, i32 %1) nounwind { +define @intrinsic_vfwcvt_f.x.v_nxv4f16_nxv4i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu @@ -99,7 +101,7 @@ define @intrinsic_vfwcvt_f.x.v_nxv4f16_nxv4i8( @llvm.riscv.vfwcvt.f.x.v.nxv4f16.nxv4i8( %0, - i32 %1) + iXLen %1) ret %a } @@ -108,10 +110,10 @@ declare @llvm.riscv.vfwcvt.f.x.v.mask.nxv4f16.nxv4i8( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwcvt_mask_f.x.v_nxv4f16_nxv4i8( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwcvt_mask_f.x.v_nxv4f16_nxv4i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu @@ -122,16 +124,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfwcvt.f.x.v.nxv8f16.nxv8i8( , - i32); + iXLen); -define @intrinsic_vfwcvt_f.x.v_nxv8f16_nxv8i8( %0, i32 %1) nounwind { +define @intrinsic_vfwcvt_f.x.v_nxv8f16_nxv8i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv8f16_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu @@ -141,7 +143,7 @@ define @intrinsic_vfwcvt_f.x.v_nxv8f16_nxv8i8( @llvm.riscv.vfwcvt.f.x.v.nxv8f16.nxv8i8( %0, - i32 %1) + iXLen %1) ret %a } @@ -150,10 +152,10 @@ declare @llvm.riscv.vfwcvt.f.x.v.mask.nxv8f16.nxv8i8( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwcvt_mask_f.x.v_nxv8f16_nxv8i8( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwcvt_mask_f.x.v_nxv8f16_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv8f16_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu @@ -164,16 +166,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfwcvt.f.x.v.nxv16f16.nxv16i8( , - i32); + iXLen); -define @intrinsic_vfwcvt_f.x.v_nxv16f16_nxv16i8( %0, i32 %1) nounwind { +define @intrinsic_vfwcvt_f.x.v_nxv16f16_nxv16i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv16f16_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu @@ -183,7 +185,7 @@ define @intrinsic_vfwcvt_f.x.v_nxv16f16_nxv16i8( @llvm.riscv.vfwcvt.f.x.v.nxv16f16.nxv16i8( %0, - i32 %1) + iXLen %1) ret %a } @@ -192,10 +194,10 @@ declare @llvm.riscv.vfwcvt.f.x.v.mask.nxv16f16.nxv16i8( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwcvt_mask_f.x.v_nxv16f16_nxv16i8( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwcvt_mask_f.x.v_nxv16f16_nxv16i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv16f16_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu @@ -206,16 +208,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfwcvt.f.x.v.nxv32f16.nxv32i8( , - i32); + iXLen); -define @intrinsic_vfwcvt_f.x.v_nxv32f16_nxv32i8( %0, i32 %1) nounwind { +define @intrinsic_vfwcvt_f.x.v_nxv32f16_nxv32i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv32f16_nxv32i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu @@ -225,7 +227,7 @@ define @intrinsic_vfwcvt_f.x.v_nxv32f16_nxv32i8( @llvm.riscv.vfwcvt.f.x.v.nxv32f16.nxv32i8( %0, - i32 %1) + iXLen %1) ret %a } @@ -234,10 +236,10 @@ declare @llvm.riscv.vfwcvt.f.x.v.mask.nxv32f16.nxv32i8( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwcvt_mask_f.x.v_nxv32f16_nxv32i8( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwcvt_mask_f.x.v_nxv32f16_nxv32i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv32f16_nxv32i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu @@ -248,16 +250,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfwcvt.f.x.v.nxv1f32.nxv1i16( , - i32); + iXLen); -define @intrinsic_vfwcvt_f.x.v_nxv1f32_nxv1i16( %0, i32 %1) nounwind { +define @intrinsic_vfwcvt_f.x.v_nxv1f32_nxv1i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -267,7 +269,7 @@ define @intrinsic_vfwcvt_f.x.v_nxv1f32_nxv1i16( @llvm.riscv.vfwcvt.f.x.v.nxv1f32.nxv1i16( %0, - i32 %1) + iXLen %1) ret %a } @@ -276,10 +278,10 @@ declare @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f32.nxv1i16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwcvt_mask_f.x.v_nxv1f32_nxv1i16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwcvt_mask_f.x.v_nxv1f32_nxv1i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -290,16 +292,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfwcvt.f.x.v.nxv2f32.nxv2i16( , - i32); + iXLen); -define @intrinsic_vfwcvt_f.x.v_nxv2f32_nxv2i16( %0, i32 %1) nounwind { +define @intrinsic_vfwcvt_f.x.v_nxv2f32_nxv2i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -309,7 +311,7 @@ define @intrinsic_vfwcvt_f.x.v_nxv2f32_nxv2i16( @llvm.riscv.vfwcvt.f.x.v.nxv2f32.nxv2i16( %0, - i32 %1) + iXLen %1) ret %a } @@ -318,10 +320,10 @@ declare @llvm.riscv.vfwcvt.f.x.v.mask.nxv2f32.nxv2i16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwcvt_mask_f.x.v_nxv2f32_nxv2i16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwcvt_mask_f.x.v_nxv2f32_nxv2i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -332,16 +334,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfwcvt.f.x.v.nxv4f32.nxv4i16( , - i32); + iXLen); -define @intrinsic_vfwcvt_f.x.v_nxv4f32_nxv4i16( %0, i32 %1) nounwind { +define @intrinsic_vfwcvt_f.x.v_nxv4f32_nxv4i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv4f32_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -351,7 +353,7 @@ define @intrinsic_vfwcvt_f.x.v_nxv4f32_nxv4i16( @llvm.riscv.vfwcvt.f.x.v.nxv4f32.nxv4i16( %0, - i32 %1) + iXLen %1) ret %a } @@ -360,10 +362,10 @@ declare @llvm.riscv.vfwcvt.f.x.v.mask.nxv4f32.nxv4i16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwcvt_mask_f.x.v_nxv4f32_nxv4i16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwcvt_mask_f.x.v_nxv4f32_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv4f32_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -374,16 +376,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfwcvt.f.x.v.nxv8f32.nxv8i16( , - i32); + iXLen); -define @intrinsic_vfwcvt_f.x.v_nxv8f32_nxv8i16( %0, i32 %1) nounwind { +define @intrinsic_vfwcvt_f.x.v_nxv8f32_nxv8i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv8f32_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -393,7 +395,7 @@ define @intrinsic_vfwcvt_f.x.v_nxv8f32_nxv8i16( @llvm.riscv.vfwcvt.f.x.v.nxv8f32.nxv8i16( %0, - i32 %1) + iXLen %1) ret %a } @@ -402,10 +404,10 @@ declare @llvm.riscv.vfwcvt.f.x.v.mask.nxv8f32.nxv8i16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwcvt_mask_f.x.v_nxv8f32_nxv8i16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwcvt_mask_f.x.v_nxv8f32_nxv8i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv8f32_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -416,16 +418,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfwcvt.f.x.v.nxv16f32.nxv16i16( , - i32); + iXLen); -define @intrinsic_vfwcvt_f.x.v_nxv16f32_nxv16i16( %0, i32 %1) nounwind { +define @intrinsic_vfwcvt_f.x.v_nxv16f32_nxv16i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv16f32_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -435,7 +437,7 @@ define @intrinsic_vfwcvt_f.x.v_nxv16f32_nxv16i16( @llvm.riscv.vfwcvt.f.x.v.nxv16f32.nxv16i16( %0, - i32 %1) + iXLen %1) ret %a } @@ -444,10 +446,10 @@ declare @llvm.riscv.vfwcvt.f.x.v.mask.nxv16f32.nxv16i16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwcvt_mask_f.x.v_nxv16f32_nxv16i16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwcvt_mask_f.x.v_nxv16f32_nxv16i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv16f32_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -458,16 +460,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfwcvt.f.x.v.nxv1f64.nxv1i32( , - i32); + iXLen); -define @intrinsic_vfwcvt_f.x.v_nxv1f64_nxv1i32( %0, i32 %1) nounwind { +define @intrinsic_vfwcvt_f.x.v_nxv1f64_nxv1i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -477,7 +479,7 @@ define @intrinsic_vfwcvt_f.x.v_nxv1f64_nxv1i32( @llvm.riscv.vfwcvt.f.x.v.nxv1f64.nxv1i32( %0, - i32 %1) + iXLen %1) ret %a } @@ -486,10 +488,10 @@ declare @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f64.nxv1i32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwcvt_mask_f.x.v_nxv1f64_nxv1i32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwcvt_mask_f.x.v_nxv1f64_nxv1i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -500,16 +502,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfwcvt.f.x.v.nxv2f64.nxv2i32( , - i32); + iXLen); -define @intrinsic_vfwcvt_f.x.v_nxv2f64_nxv2i32( %0, i32 %1) nounwind { +define @intrinsic_vfwcvt_f.x.v_nxv2f64_nxv2i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv2f64_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -519,7 +521,7 @@ define @intrinsic_vfwcvt_f.x.v_nxv2f64_nxv2i32( @llvm.riscv.vfwcvt.f.x.v.nxv2f64.nxv2i32( %0, - i32 %1) + iXLen %1) ret %a } @@ -528,10 +530,10 @@ declare @llvm.riscv.vfwcvt.f.x.v.mask.nxv2f64.nxv2i32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwcvt_mask_f.x.v_nxv2f64_nxv2i32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwcvt_mask_f.x.v_nxv2f64_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv2f64_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -542,16 +544,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfwcvt.f.x.v.nxv4f64.nxv4i32( , - i32); + iXLen); -define @intrinsic_vfwcvt_f.x.v_nxv4f64_nxv4i32( %0, i32 %1) nounwind { +define @intrinsic_vfwcvt_f.x.v_nxv4f64_nxv4i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv4f64_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -561,7 +563,7 @@ define @intrinsic_vfwcvt_f.x.v_nxv4f64_nxv4i32( @llvm.riscv.vfwcvt.f.x.v.nxv4f64.nxv4i32( %0, - i32 %1) + iXLen %1) ret %a } @@ -570,10 +572,10 @@ declare @llvm.riscv.vfwcvt.f.x.v.mask.nxv4f64.nxv4i32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwcvt_mask_f.x.v_nxv4f64_nxv4i32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwcvt_mask_f.x.v_nxv4f64_nxv4i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv4f64_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -584,16 +586,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfwcvt.f.x.v.nxv8f64.nxv8i32( , - i32); + iXLen); -define @intrinsic_vfwcvt_f.x.v_nxv8f64_nxv8i32( %0, i32 %1) nounwind { +define @intrinsic_vfwcvt_f.x.v_nxv8f64_nxv8i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv8f64_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -603,7 +605,7 @@ define @intrinsic_vfwcvt_f.x.v_nxv8f64_nxv8i32( @llvm.riscv.vfwcvt.f.x.v.nxv8f64.nxv8i32( %0, - i32 %1) + iXLen %1) ret %a } @@ -612,10 +614,10 @@ declare @llvm.riscv.vfwcvt.f.x.v.mask.nxv8f64.nxv8i32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwcvt_mask_f.x.v_nxv8f64_nxv8i32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwcvt_mask_f.x.v_nxv8f64_nxv8i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv8f64_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -626,7 +628,7 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-xu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-xu-rv64.ll deleted file mode 100644 index 107813b7879bd4c80ecb7d3a968ddcb1db36a257..0000000000000000000000000000000000000000 --- a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-xu-rv64.ll +++ /dev/null @@ -1,632 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s -declare @llvm.riscv.vfwcvt.f.xu.v.nxv1f16.nxv1i8( - , - i64); - -define @intrinsic_vfwcvt_f.xu.v_nxv1f16_nxv1i8( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv1f16_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu -; CHECK-NEXT: vfwcvt.f.xu.v v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.f.xu.v.nxv1f16.nxv1i8( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1f16.nxv1i8( - , - , - , - i64, - i64); - -define @intrinsic_vfwcvt_mask_f.xu.v_nxv1f16_nxv1i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv1f16_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu -; CHECK-NEXT: vfwcvt.f.xu.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1f16.nxv1i8( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.f.xu.v.nxv2f16.nxv2i8( - , - i64); - -define @intrinsic_vfwcvt_f.xu.v_nxv2f16_nxv2i8( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv2f16_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu -; CHECK-NEXT: vfwcvt.f.xu.v v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.f.xu.v.nxv2f16.nxv2i8( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2f16.nxv2i8( - , - , - , - i64, - i64); - -define @intrinsic_vfwcvt_mask_f.xu.v_nxv2f16_nxv2i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv2f16_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu -; CHECK-NEXT: vfwcvt.f.xu.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2f16.nxv2i8( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.f.xu.v.nxv4f16.nxv4i8( - , - i64); - -define @intrinsic_vfwcvt_f.xu.v_nxv4f16_nxv4i8( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv4f16_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu -; CHECK-NEXT: vfwcvt.f.xu.v v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.f.xu.v.nxv4f16.nxv4i8( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4f16.nxv4i8( - , - , - , - i64, - i64); - -define @intrinsic_vfwcvt_mask_f.xu.v_nxv4f16_nxv4i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv4f16_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu -; CHECK-NEXT: vfwcvt.f.xu.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4f16.nxv4i8( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.f.xu.v.nxv8f16.nxv8i8( - , - i64); - -define @intrinsic_vfwcvt_f.xu.v_nxv8f16_nxv8i8( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv8f16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu -; CHECK-NEXT: vfwcvt.f.xu.v v10, v8 -; CHECK-NEXT: vmv2r.v v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.f.xu.v.nxv8f16.nxv8i8( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8f16.nxv8i8( - , - , - , - i64, - i64); - -define @intrinsic_vfwcvt_mask_f.xu.v_nxv8f16_nxv8i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv8f16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu -; CHECK-NEXT: vfwcvt.f.xu.v v8, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8f16.nxv8i8( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.f.xu.v.nxv16f16.nxv16i8( - , - i64); - -define @intrinsic_vfwcvt_f.xu.v_nxv16f16_nxv16i8( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv16f16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu -; CHECK-NEXT: vfwcvt.f.xu.v v12, v8 -; CHECK-NEXT: vmv4r.v v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.f.xu.v.nxv16f16.nxv16i8( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.f.xu.v.mask.nxv16f16.nxv16i8( - , - , - , - i64, - i64); - -define @intrinsic_vfwcvt_mask_f.xu.v_nxv16f16_nxv16i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv16f16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu -; CHECK-NEXT: vfwcvt.f.xu.v v8, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv16f16.nxv16i8( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.f.xu.v.nxv32f16.nxv32i8( - , - i64); - -define @intrinsic_vfwcvt_f.xu.v_nxv32f16_nxv32i8( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv32f16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu -; CHECK-NEXT: vfwcvt.f.xu.v v16, v8 -; CHECK-NEXT: vmv8r.v v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.f.xu.v.nxv32f16.nxv32i8( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.f.xu.v.mask.nxv32f16.nxv32i8( - , - , - , - i64, - i64); - -define @intrinsic_vfwcvt_mask_f.xu.v_nxv32f16_nxv32i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv32f16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu -; CHECK-NEXT: vfwcvt.f.xu.v v8, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv32f16.nxv32i8( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.f.xu.v.nxv1f32.nxv1i16( - , - i64); - -define @intrinsic_vfwcvt_f.xu.v_nxv1f32_nxv1i16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv1f32_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfwcvt.f.xu.v v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.f.xu.v.nxv1f32.nxv1i16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1f32.nxv1i16( - , - , - , - i64, - i64); - -define @intrinsic_vfwcvt_mask_f.xu.v_nxv1f32_nxv1i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv1f32_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfwcvt.f.xu.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1f32.nxv1i16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.f.xu.v.nxv2f32.nxv2i16( - , - i64); - -define @intrinsic_vfwcvt_f.xu.v_nxv2f32_nxv2i16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv2f32_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfwcvt.f.xu.v v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.f.xu.v.nxv2f32.nxv2i16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2f32.nxv2i16( - , - , - , - i64, - i64); - -define @intrinsic_vfwcvt_mask_f.xu.v_nxv2f32_nxv2i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv2f32_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfwcvt.f.xu.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2f32.nxv2i16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.f.xu.v.nxv4f32.nxv4i16( - , - i64); - -define @intrinsic_vfwcvt_f.xu.v_nxv4f32_nxv4i16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv4f32_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfwcvt.f.xu.v v10, v8 -; CHECK-NEXT: vmv2r.v v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.f.xu.v.nxv4f32.nxv4i16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4f32.nxv4i16( - , - , - , - i64, - i64); - -define @intrinsic_vfwcvt_mask_f.xu.v_nxv4f32_nxv4i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv4f32_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfwcvt.f.xu.v v8, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4f32.nxv4i16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.f.xu.v.nxv8f32.nxv8i16( - , - i64); - -define @intrinsic_vfwcvt_f.xu.v_nxv8f32_nxv8i16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv8f32_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfwcvt.f.xu.v v12, v8 -; CHECK-NEXT: vmv4r.v v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.f.xu.v.nxv8f32.nxv8i16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8f32.nxv8i16( - , - , - , - i64, - i64); - -define @intrinsic_vfwcvt_mask_f.xu.v_nxv8f32_nxv8i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv8f32_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfwcvt.f.xu.v v8, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8f32.nxv8i16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.f.xu.v.nxv16f32.nxv16i16( - , - i64); - -define @intrinsic_vfwcvt_f.xu.v_nxv16f32_nxv16i16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv16f32_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfwcvt.f.xu.v v16, v8 -; CHECK-NEXT: vmv8r.v v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.f.xu.v.nxv16f32.nxv16i16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.f.xu.v.mask.nxv16f32.nxv16i16( - , - , - , - i64, - i64); - -define @intrinsic_vfwcvt_mask_f.xu.v_nxv16f32_nxv16i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv16f32_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfwcvt.f.xu.v v8, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv16f32.nxv16i16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.f.xu.v.nxv1f64.nxv1i32( - , - i64); - -define @intrinsic_vfwcvt_f.xu.v_nxv1f64_nxv1i32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv1f64_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfwcvt.f.xu.v v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.f.xu.v.nxv1f64.nxv1i32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1f64.nxv1i32( - , - , - , - i64, - i64); - -define @intrinsic_vfwcvt_mask_f.xu.v_nxv1f64_nxv1i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv1f64_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfwcvt.f.xu.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1f64.nxv1i32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.f.xu.v.nxv2f64.nxv2i32( - , - i64); - -define @intrinsic_vfwcvt_f.xu.v_nxv2f64_nxv2i32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv2f64_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfwcvt.f.xu.v v10, v8 -; CHECK-NEXT: vmv2r.v v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.f.xu.v.nxv2f64.nxv2i32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2f64.nxv2i32( - , - , - , - i64, - i64); - -define @intrinsic_vfwcvt_mask_f.xu.v_nxv2f64_nxv2i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv2f64_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfwcvt.f.xu.v v8, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2f64.nxv2i32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.f.xu.v.nxv4f64.nxv4i32( - , - i64); - -define @intrinsic_vfwcvt_f.xu.v_nxv4f64_nxv4i32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv4f64_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfwcvt.f.xu.v v12, v8 -; CHECK-NEXT: vmv4r.v v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.f.xu.v.nxv4f64.nxv4i32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4f64.nxv4i32( - , - , - , - i64, - i64); - -define @intrinsic_vfwcvt_mask_f.xu.v_nxv4f64_nxv4i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv4f64_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfwcvt.f.xu.v v8, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4f64.nxv4i32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.f.xu.v.nxv8f64.nxv8i32( - , - i64); - -define @intrinsic_vfwcvt_f.xu.v_nxv8f64_nxv8i32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv8f64_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfwcvt.f.xu.v v16, v8 -; CHECK-NEXT: vmv8r.v v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.f.xu.v.nxv8f64.nxv8i32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8f64.nxv8i32( - , - , - , - i64, - i64); - -define @intrinsic_vfwcvt_mask_f.xu.v_nxv8f64_nxv8i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv8f64_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfwcvt.f.xu.v v8, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8f64.nxv8i32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-xu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-xu.ll similarity index 87% rename from llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-xu-rv32.ll rename to llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-xu.ll index fc0af066c39ece336246432d2f0d0388bea731f3..9eef34d4de1e1c84570214031d1822c06ce542be 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-xu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-xu.ll @@ -1,11 +1,13 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfwcvt.f.xu.v.nxv1f16.nxv1i8( , - i32); + iXLen); -define @intrinsic_vfwcvt_f.xu.v_nxv1f16_nxv1i8( %0, i32 %1) nounwind { +define @intrinsic_vfwcvt_f.xu.v_nxv1f16_nxv1i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu @@ -15,7 +17,7 @@ define @intrinsic_vfwcvt_f.xu.v_nxv1f16_nxv1i8( @llvm.riscv.vfwcvt.f.xu.v.nxv1f16.nxv1i8( %0, - i32 %1) + iXLen %1) ret %a } @@ -24,10 +26,10 @@ declare @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1f16.nxv1i8( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwcvt_mask_f.xu.v_nxv1f16_nxv1i8( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwcvt_mask_f.xu.v_nxv1f16_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu @@ -38,16 +40,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfwcvt.f.xu.v.nxv2f16.nxv2i8( , - i32); + iXLen); -define @intrinsic_vfwcvt_f.xu.v_nxv2f16_nxv2i8( %0, i32 %1) nounwind { +define @intrinsic_vfwcvt_f.xu.v_nxv2f16_nxv2i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu @@ -57,7 +59,7 @@ define @intrinsic_vfwcvt_f.xu.v_nxv2f16_nxv2i8( @llvm.riscv.vfwcvt.f.xu.v.nxv2f16.nxv2i8( %0, - i32 %1) + iXLen %1) ret %a } @@ -66,10 +68,10 @@ declare @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2f16.nxv2i8( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwcvt_mask_f.xu.v_nxv2f16_nxv2i8( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwcvt_mask_f.xu.v_nxv2f16_nxv2i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu @@ -80,16 +82,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfwcvt.f.xu.v.nxv4f16.nxv4i8( , - i32); + iXLen); -define @intrinsic_vfwcvt_f.xu.v_nxv4f16_nxv4i8( %0, i32 %1) nounwind { +define @intrinsic_vfwcvt_f.xu.v_nxv4f16_nxv4i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu @@ -99,7 +101,7 @@ define @intrinsic_vfwcvt_f.xu.v_nxv4f16_nxv4i8( @llvm.riscv.vfwcvt.f.xu.v.nxv4f16.nxv4i8( %0, - i32 %1) + iXLen %1) ret %a } @@ -108,10 +110,10 @@ declare @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4f16.nxv4i8( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwcvt_mask_f.xu.v_nxv4f16_nxv4i8( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwcvt_mask_f.xu.v_nxv4f16_nxv4i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu @@ -122,16 +124,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfwcvt.f.xu.v.nxv8f16.nxv8i8( , - i32); + iXLen); -define @intrinsic_vfwcvt_f.xu.v_nxv8f16_nxv8i8( %0, i32 %1) nounwind { +define @intrinsic_vfwcvt_f.xu.v_nxv8f16_nxv8i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv8f16_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu @@ -141,7 +143,7 @@ define @intrinsic_vfwcvt_f.xu.v_nxv8f16_nxv8i8( @llvm.riscv.vfwcvt.f.xu.v.nxv8f16.nxv8i8( %0, - i32 %1) + iXLen %1) ret %a } @@ -150,10 +152,10 @@ declare @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8f16.nxv8i8( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwcvt_mask_f.xu.v_nxv8f16_nxv8i8( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwcvt_mask_f.xu.v_nxv8f16_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv8f16_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu @@ -164,16 +166,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfwcvt.f.xu.v.nxv16f16.nxv16i8( , - i32); + iXLen); -define @intrinsic_vfwcvt_f.xu.v_nxv16f16_nxv16i8( %0, i32 %1) nounwind { +define @intrinsic_vfwcvt_f.xu.v_nxv16f16_nxv16i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv16f16_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu @@ -183,7 +185,7 @@ define @intrinsic_vfwcvt_f.xu.v_nxv16f16_nxv16i8( @llvm.riscv.vfwcvt.f.xu.v.nxv16f16.nxv16i8( %0, - i32 %1) + iXLen %1) ret %a } @@ -192,10 +194,10 @@ declare @llvm.riscv.vfwcvt.f.xu.v.mask.nxv16f16.nxv16i8( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwcvt_mask_f.xu.v_nxv16f16_nxv16i8( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwcvt_mask_f.xu.v_nxv16f16_nxv16i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv16f16_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu @@ -206,16 +208,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfwcvt.f.xu.v.nxv32f16.nxv32i8( , - i32); + iXLen); -define @intrinsic_vfwcvt_f.xu.v_nxv32f16_nxv32i8( %0, i32 %1) nounwind { +define @intrinsic_vfwcvt_f.xu.v_nxv32f16_nxv32i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv32f16_nxv32i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu @@ -225,7 +227,7 @@ define @intrinsic_vfwcvt_f.xu.v_nxv32f16_nxv32i8( @llvm.riscv.vfwcvt.f.xu.v.nxv32f16.nxv32i8( %0, - i32 %1) + iXLen %1) ret %a } @@ -234,10 +236,10 @@ declare @llvm.riscv.vfwcvt.f.xu.v.mask.nxv32f16.nxv32i8( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwcvt_mask_f.xu.v_nxv32f16_nxv32i8( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwcvt_mask_f.xu.v_nxv32f16_nxv32i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv32f16_nxv32i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu @@ -248,16 +250,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfwcvt.f.xu.v.nxv1f32.nxv1i16( , - i32); + iXLen); -define @intrinsic_vfwcvt_f.xu.v_nxv1f32_nxv1i16( %0, i32 %1) nounwind { +define @intrinsic_vfwcvt_f.xu.v_nxv1f32_nxv1i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -267,7 +269,7 @@ define @intrinsic_vfwcvt_f.xu.v_nxv1f32_nxv1i16( @llvm.riscv.vfwcvt.f.xu.v.nxv1f32.nxv1i16( %0, - i32 %1) + iXLen %1) ret %a } @@ -276,10 +278,10 @@ declare @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1f32.nxv1i16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwcvt_mask_f.xu.v_nxv1f32_nxv1i16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwcvt_mask_f.xu.v_nxv1f32_nxv1i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -290,16 +292,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfwcvt.f.xu.v.nxv2f32.nxv2i16( , - i32); + iXLen); -define @intrinsic_vfwcvt_f.xu.v_nxv2f32_nxv2i16( %0, i32 %1) nounwind { +define @intrinsic_vfwcvt_f.xu.v_nxv2f32_nxv2i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -309,7 +311,7 @@ define @intrinsic_vfwcvt_f.xu.v_nxv2f32_nxv2i16( @llvm.riscv.vfwcvt.f.xu.v.nxv2f32.nxv2i16( %0, - i32 %1) + iXLen %1) ret %a } @@ -318,10 +320,10 @@ declare @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2f32.nxv2i16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwcvt_mask_f.xu.v_nxv2f32_nxv2i16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwcvt_mask_f.xu.v_nxv2f32_nxv2i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -332,16 +334,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfwcvt.f.xu.v.nxv4f32.nxv4i16( , - i32); + iXLen); -define @intrinsic_vfwcvt_f.xu.v_nxv4f32_nxv4i16( %0, i32 %1) nounwind { +define @intrinsic_vfwcvt_f.xu.v_nxv4f32_nxv4i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv4f32_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -351,7 +353,7 @@ define @intrinsic_vfwcvt_f.xu.v_nxv4f32_nxv4i16( @llvm.riscv.vfwcvt.f.xu.v.nxv4f32.nxv4i16( %0, - i32 %1) + iXLen %1) ret %a } @@ -360,10 +362,10 @@ declare @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4f32.nxv4i16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwcvt_mask_f.xu.v_nxv4f32_nxv4i16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwcvt_mask_f.xu.v_nxv4f32_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv4f32_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -374,16 +376,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfwcvt.f.xu.v.nxv8f32.nxv8i16( , - i32); + iXLen); -define @intrinsic_vfwcvt_f.xu.v_nxv8f32_nxv8i16( %0, i32 %1) nounwind { +define @intrinsic_vfwcvt_f.xu.v_nxv8f32_nxv8i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv8f32_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -393,7 +395,7 @@ define @intrinsic_vfwcvt_f.xu.v_nxv8f32_nxv8i16( @llvm.riscv.vfwcvt.f.xu.v.nxv8f32.nxv8i16( %0, - i32 %1) + iXLen %1) ret %a } @@ -402,10 +404,10 @@ declare @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8f32.nxv8i16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwcvt_mask_f.xu.v_nxv8f32_nxv8i16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwcvt_mask_f.xu.v_nxv8f32_nxv8i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv8f32_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -416,16 +418,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfwcvt.f.xu.v.nxv16f32.nxv16i16( , - i32); + iXLen); -define @intrinsic_vfwcvt_f.xu.v_nxv16f32_nxv16i16( %0, i32 %1) nounwind { +define @intrinsic_vfwcvt_f.xu.v_nxv16f32_nxv16i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv16f32_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -435,7 +437,7 @@ define @intrinsic_vfwcvt_f.xu.v_nxv16f32_nxv16i16( @llvm.riscv.vfwcvt.f.xu.v.nxv16f32.nxv16i16( %0, - i32 %1) + iXLen %1) ret %a } @@ -444,10 +446,10 @@ declare @llvm.riscv.vfwcvt.f.xu.v.mask.nxv16f32.nxv16i16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwcvt_mask_f.xu.v_nxv16f32_nxv16i16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwcvt_mask_f.xu.v_nxv16f32_nxv16i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv16f32_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -458,16 +460,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfwcvt.f.xu.v.nxv1f64.nxv1i32( , - i32); + iXLen); -define @intrinsic_vfwcvt_f.xu.v_nxv1f64_nxv1i32( %0, i32 %1) nounwind { +define @intrinsic_vfwcvt_f.xu.v_nxv1f64_nxv1i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -477,7 +479,7 @@ define @intrinsic_vfwcvt_f.xu.v_nxv1f64_nxv1i32( @llvm.riscv.vfwcvt.f.xu.v.nxv1f64.nxv1i32( %0, - i32 %1) + iXLen %1) ret %a } @@ -486,10 +488,10 @@ declare @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1f64.nxv1i32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwcvt_mask_f.xu.v_nxv1f64_nxv1i32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwcvt_mask_f.xu.v_nxv1f64_nxv1i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -500,16 +502,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfwcvt.f.xu.v.nxv2f64.nxv2i32( , - i32); + iXLen); -define @intrinsic_vfwcvt_f.xu.v_nxv2f64_nxv2i32( %0, i32 %1) nounwind { +define @intrinsic_vfwcvt_f.xu.v_nxv2f64_nxv2i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv2f64_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -519,7 +521,7 @@ define @intrinsic_vfwcvt_f.xu.v_nxv2f64_nxv2i32( @llvm.riscv.vfwcvt.f.xu.v.nxv2f64.nxv2i32( %0, - i32 %1) + iXLen %1) ret %a } @@ -528,10 +530,10 @@ declare @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2f64.nxv2i32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwcvt_mask_f.xu.v_nxv2f64_nxv2i32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwcvt_mask_f.xu.v_nxv2f64_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv2f64_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -542,16 +544,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfwcvt.f.xu.v.nxv4f64.nxv4i32( , - i32); + iXLen); -define @intrinsic_vfwcvt_f.xu.v_nxv4f64_nxv4i32( %0, i32 %1) nounwind { +define @intrinsic_vfwcvt_f.xu.v_nxv4f64_nxv4i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv4f64_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -561,7 +563,7 @@ define @intrinsic_vfwcvt_f.xu.v_nxv4f64_nxv4i32( @llvm.riscv.vfwcvt.f.xu.v.nxv4f64.nxv4i32( %0, - i32 %1) + iXLen %1) ret %a } @@ -570,10 +572,10 @@ declare @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4f64.nxv4i32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwcvt_mask_f.xu.v_nxv4f64_nxv4i32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwcvt_mask_f.xu.v_nxv4f64_nxv4i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv4f64_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -584,16 +586,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfwcvt.f.xu.v.nxv8f64.nxv8i32( , - i32); + iXLen); -define @intrinsic_vfwcvt_f.xu.v_nxv8f64_nxv8i32( %0, i32 %1) nounwind { +define @intrinsic_vfwcvt_f.xu.v_nxv8f64_nxv8i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv8f64_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -603,7 +605,7 @@ define @intrinsic_vfwcvt_f.xu.v_nxv8f64_nxv8i32( @llvm.riscv.vfwcvt.f.xu.v.nxv8f64.nxv8i32( %0, - i32 %1) + iXLen %1) ret %a } @@ -612,10 +614,10 @@ declare @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8f64.nxv8i32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwcvt_mask_f.xu.v_nxv8f64_nxv8i32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwcvt_mask_f.xu.v_nxv8f64_nxv8i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv8f64_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -626,7 +628,7 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-x-f-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-x-f-rv64.ll deleted file mode 100644 index 4d551f62ec52f595004ba4a0c5e5b7fe066135ab..0000000000000000000000000000000000000000 --- a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-x-f-rv64.ll +++ /dev/null @@ -1,380 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s -declare @llvm.riscv.vfwcvt.rtz.x.f.v.nxv1i32.nxv1f16( - , - i64); - -define @intrinsic_vfwcvt_rtz.x.f.v_nxv1i32_nxv1f16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_rtz.x.f.v_nxv1i32_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfwcvt.rtz.x.f.v v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv1i32.nxv1f16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv1i32.nxv1f16( - , - , - , - i64, - i64); - -define @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv1i32_nxv1f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.x.f.v_nxv1i32_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfwcvt.rtz.x.f.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv1i32.nxv1f16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.rtz.x.f.v.nxv2i32.nxv2f16( - , - i64); - -define @intrinsic_vfwcvt_rtz.x.f.v_nxv2i32_nxv2f16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_rtz.x.f.v_nxv2i32_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfwcvt.rtz.x.f.v v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv2i32.nxv2f16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv2i32.nxv2f16( - , - , - , - i64, - i64); - -define @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv2i32_nxv2f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.x.f.v_nxv2i32_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfwcvt.rtz.x.f.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv2i32.nxv2f16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.rtz.x.f.v.nxv4i32.nxv4f16( - , - i64); - -define @intrinsic_vfwcvt_rtz.x.f.v_nxv4i32_nxv4f16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_rtz.x.f.v_nxv4i32_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfwcvt.rtz.x.f.v v10, v8 -; CHECK-NEXT: vmv2r.v v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv4i32.nxv4f16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv4i32.nxv4f16( - , - , - , - i64, - i64); - -define @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv4i32_nxv4f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.x.f.v_nxv4i32_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfwcvt.rtz.x.f.v v8, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv4i32.nxv4f16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.rtz.x.f.v.nxv8i32.nxv8f16( - , - i64); - -define @intrinsic_vfwcvt_rtz.x.f.v_nxv8i32_nxv8f16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_rtz.x.f.v_nxv8i32_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfwcvt.rtz.x.f.v v12, v8 -; CHECK-NEXT: vmv4r.v v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv8i32.nxv8f16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv8i32.nxv8f16( - , - , - , - i64, - i64); - -define @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv8i32_nxv8f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.x.f.v_nxv8i32_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfwcvt.rtz.x.f.v v8, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv8i32.nxv8f16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.rtz.x.f.v.nxv16i32.nxv16f16( - , - i64); - -define @intrinsic_vfwcvt_rtz.x.f.v_nxv16i32_nxv16f16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_rtz.x.f.v_nxv16i32_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfwcvt.rtz.x.f.v v16, v8 -; CHECK-NEXT: vmv8r.v v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv16i32.nxv16f16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv16i32.nxv16f16( - , - , - , - i64, - i64); - -define @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv16i32_nxv16f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.x.f.v_nxv16i32_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfwcvt.rtz.x.f.v v8, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv16i32.nxv16f16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.rtz.x.f.v.nxv1i64.nxv1f32( - , - i64); - -define @intrinsic_vfwcvt_rtz.x.f.v_nxv1i64_nxv1f32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_rtz.x.f.v_nxv1i64_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfwcvt.rtz.x.f.v v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv1i64.nxv1f32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv1i64.nxv1f32( - , - , - , - i64, - i64); - -define @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv1i64_nxv1f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.x.f.v_nxv1i64_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfwcvt.rtz.x.f.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv1i64.nxv1f32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.rtz.x.f.v.nxv2i64.nxv2f32( - , - i64); - -define @intrinsic_vfwcvt_rtz.x.f.v_nxv2i64_nxv2f32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_rtz.x.f.v_nxv2i64_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfwcvt.rtz.x.f.v v10, v8 -; CHECK-NEXT: vmv2r.v v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv2i64.nxv2f32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv2i64.nxv2f32( - , - , - , - i64, - i64); - -define @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv2i64_nxv2f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.x.f.v_nxv2i64_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfwcvt.rtz.x.f.v v8, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv2i64.nxv2f32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.rtz.x.f.v.nxv4i64.nxv4f32( - , - i64); - -define @intrinsic_vfwcvt_rtz.x.f.v_nxv4i64_nxv4f32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_rtz.x.f.v_nxv4i64_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfwcvt.rtz.x.f.v v12, v8 -; CHECK-NEXT: vmv4r.v v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv4i64.nxv4f32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv4i64.nxv4f32( - , - , - , - i64, - i64); - -define @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv4i64_nxv4f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.x.f.v_nxv4i64_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfwcvt.rtz.x.f.v v8, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv4i64.nxv4f32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.rtz.x.f.v.nxv8i64.nxv8f32( - , - i64); - -define @intrinsic_vfwcvt_rtz.x.f.v_nxv8i64_nxv8f32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_rtz.x.f.v_nxv8i64_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfwcvt.rtz.x.f.v v16, v8 -; CHECK-NEXT: vmv8r.v v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv8i64.nxv8f32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv8i64.nxv8f32( - , - , - , - i64, - i64); - -define @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv8i64_nxv8f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.x.f.v_nxv8i64_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfwcvt.rtz.x.f.v v8, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv8i64.nxv8f32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-x-f-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-x-f.ll similarity index 87% rename from llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-x-f-rv32.ll rename to llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-x-f.ll index d6cf1b35631057023b7707f37918ba6c17a58f30..0f7a46aadfd1544bf409ceb1d54fd3afcefec6af 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-x-f-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-x-f.ll @@ -1,11 +1,13 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfwcvt.rtz.x.f.v.nxv1i32.nxv1f16( , - i32); + iXLen); -define @intrinsic_vfwcvt_rtz.x.f.v_nxv1i32_nxv1f16( %0, i32 %1) nounwind { +define @intrinsic_vfwcvt_rtz.x.f.v_nxv1i32_nxv1f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_rtz.x.f.v_nxv1i32_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -15,7 +17,7 @@ define @intrinsic_vfwcvt_rtz.x.f.v_nxv1i32_nxv1f16( @llvm.riscv.vfwcvt.rtz.x.f.v.nxv1i32.nxv1f16( %0, - i32 %1) + iXLen %1) ret %a } @@ -24,10 +26,10 @@ declare @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv1i32.nxv1f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv1i32_nxv1f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv1i32_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.x.f.v_nxv1i32_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -38,16 +40,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfwcvt.rtz.x.f.v.nxv2i32.nxv2f16( , - i32); + iXLen); -define @intrinsic_vfwcvt_rtz.x.f.v_nxv2i32_nxv2f16( %0, i32 %1) nounwind { +define @intrinsic_vfwcvt_rtz.x.f.v_nxv2i32_nxv2f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_rtz.x.f.v_nxv2i32_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -57,7 +59,7 @@ define @intrinsic_vfwcvt_rtz.x.f.v_nxv2i32_nxv2f16( @llvm.riscv.vfwcvt.rtz.x.f.v.nxv2i32.nxv2f16( %0, - i32 %1) + iXLen %1) ret %a } @@ -66,10 +68,10 @@ declare @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv2i32.nxv2f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv2i32_nxv2f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv2i32_nxv2f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.x.f.v_nxv2i32_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -80,16 +82,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfwcvt.rtz.x.f.v.nxv4i32.nxv4f16( , - i32); + iXLen); -define @intrinsic_vfwcvt_rtz.x.f.v_nxv4i32_nxv4f16( %0, i32 %1) nounwind { +define @intrinsic_vfwcvt_rtz.x.f.v_nxv4i32_nxv4f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_rtz.x.f.v_nxv4i32_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -99,7 +101,7 @@ define @intrinsic_vfwcvt_rtz.x.f.v_nxv4i32_nxv4f16( @llvm.riscv.vfwcvt.rtz.x.f.v.nxv4i32.nxv4f16( %0, - i32 %1) + iXLen %1) ret %a } @@ -108,10 +110,10 @@ declare @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv4i32.nxv4f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv4i32_nxv4f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv4i32_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.x.f.v_nxv4i32_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -122,16 +124,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfwcvt.rtz.x.f.v.nxv8i32.nxv8f16( , - i32); + iXLen); -define @intrinsic_vfwcvt_rtz.x.f.v_nxv8i32_nxv8f16( %0, i32 %1) nounwind { +define @intrinsic_vfwcvt_rtz.x.f.v_nxv8i32_nxv8f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_rtz.x.f.v_nxv8i32_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -141,7 +143,7 @@ define @intrinsic_vfwcvt_rtz.x.f.v_nxv8i32_nxv8f16( @llvm.riscv.vfwcvt.rtz.x.f.v.nxv8i32.nxv8f16( %0, - i32 %1) + iXLen %1) ret %a } @@ -150,10 +152,10 @@ declare @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv8i32.nxv8f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv8i32_nxv8f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv8i32_nxv8f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.x.f.v_nxv8i32_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -164,16 +166,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfwcvt.rtz.x.f.v.nxv16i32.nxv16f16( , - i32); + iXLen); -define @intrinsic_vfwcvt_rtz.x.f.v_nxv16i32_nxv16f16( %0, i32 %1) nounwind { +define @intrinsic_vfwcvt_rtz.x.f.v_nxv16i32_nxv16f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_rtz.x.f.v_nxv16i32_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -183,7 +185,7 @@ define @intrinsic_vfwcvt_rtz.x.f.v_nxv16i32_nxv16f16( @llvm.riscv.vfwcvt.rtz.x.f.v.nxv16i32.nxv16f16( %0, - i32 %1) + iXLen %1) ret %a } @@ -192,10 +194,10 @@ declare @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv16i32.nxv16f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv16i32_nxv16f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv16i32_nxv16f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.x.f.v_nxv16i32_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -206,16 +208,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfwcvt.rtz.x.f.v.nxv1i64.nxv1f32( , - i32); + iXLen); -define @intrinsic_vfwcvt_rtz.x.f.v_nxv1i64_nxv1f32( %0, i32 %1) nounwind { +define @intrinsic_vfwcvt_rtz.x.f.v_nxv1i64_nxv1f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_rtz.x.f.v_nxv1i64_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -225,7 +227,7 @@ define @intrinsic_vfwcvt_rtz.x.f.v_nxv1i64_nxv1f32( @llvm.riscv.vfwcvt.rtz.x.f.v.nxv1i64.nxv1f32( %0, - i32 %1) + iXLen %1) ret %a } @@ -234,10 +236,10 @@ declare @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv1i64.nxv1f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv1i64_nxv1f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv1i64_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.x.f.v_nxv1i64_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -248,16 +250,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfwcvt.rtz.x.f.v.nxv2i64.nxv2f32( , - i32); + iXLen); -define @intrinsic_vfwcvt_rtz.x.f.v_nxv2i64_nxv2f32( %0, i32 %1) nounwind { +define @intrinsic_vfwcvt_rtz.x.f.v_nxv2i64_nxv2f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_rtz.x.f.v_nxv2i64_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -267,7 +269,7 @@ define @intrinsic_vfwcvt_rtz.x.f.v_nxv2i64_nxv2f32( @llvm.riscv.vfwcvt.rtz.x.f.v.nxv2i64.nxv2f32( %0, - i32 %1) + iXLen %1) ret %a } @@ -276,10 +278,10 @@ declare @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv2i64.nxv2f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv2i64_nxv2f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv2i64_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.x.f.v_nxv2i64_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -290,16 +292,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfwcvt.rtz.x.f.v.nxv4i64.nxv4f32( , - i32); + iXLen); -define @intrinsic_vfwcvt_rtz.x.f.v_nxv4i64_nxv4f32( %0, i32 %1) nounwind { +define @intrinsic_vfwcvt_rtz.x.f.v_nxv4i64_nxv4f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_rtz.x.f.v_nxv4i64_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -309,7 +311,7 @@ define @intrinsic_vfwcvt_rtz.x.f.v_nxv4i64_nxv4f32( @llvm.riscv.vfwcvt.rtz.x.f.v.nxv4i64.nxv4f32( %0, - i32 %1) + iXLen %1) ret %a } @@ -318,10 +320,10 @@ declare @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv4i64.nxv4f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv4i64_nxv4f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv4i64_nxv4f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.x.f.v_nxv4i64_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -332,16 +334,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfwcvt.rtz.x.f.v.nxv8i64.nxv8f32( , - i32); + iXLen); -define @intrinsic_vfwcvt_rtz.x.f.v_nxv8i64_nxv8f32( %0, i32 %1) nounwind { +define @intrinsic_vfwcvt_rtz.x.f.v_nxv8i64_nxv8f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_rtz.x.f.v_nxv8i64_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -351,7 +353,7 @@ define @intrinsic_vfwcvt_rtz.x.f.v_nxv8i64_nxv8f32( @llvm.riscv.vfwcvt.rtz.x.f.v.nxv8i64.nxv8f32( %0, - i32 %1) + iXLen %1) ret %a } @@ -360,10 +362,10 @@ declare @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv8i64.nxv8f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv8i64_nxv8f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv8i64_nxv8f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.x.f.v_nxv8i64_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -374,7 +376,7 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-xu-f-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-xu-f-rv32.ll deleted file mode 100644 index c419e08471ca1d9d8812084f7eb3c9b2328b4e22..0000000000000000000000000000000000000000 --- a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-xu-f-rv32.ll +++ /dev/null @@ -1,380 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s -declare @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv1i32.nxv1f16( - , - i32); - -define @intrinsic_vfwcvt_rtz.xu.f.v_nxv1i32_nxv1f16( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_rtz.xu.f.v_nxv1i32_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfwcvt.rtz.xu.f.v v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv1i32.nxv1f16( - %0, - i32 %1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv1i32.nxv1f16( - , - , - , - i32, - i32); - -define @intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv1i32_nxv1f16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv1i32_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfwcvt.rtz.xu.f.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv1i32.nxv1f16( - %0, - %1, - %2, - i32 %3, i32 1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv2i32.nxv2f16( - , - i32); - -define @intrinsic_vfwcvt_rtz.xu.f.v_nxv2i32_nxv2f16( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_rtz.xu.f.v_nxv2i32_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfwcvt.rtz.xu.f.v v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv2i32.nxv2f16( - %0, - i32 %1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv2i32.nxv2f16( - , - , - , - i32, - i32); - -define @intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv2i32_nxv2f16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv2i32_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfwcvt.rtz.xu.f.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv2i32.nxv2f16( - %0, - %1, - %2, - i32 %3, i32 1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv4i32.nxv4f16( - , - i32); - -define @intrinsic_vfwcvt_rtz.xu.f.v_nxv4i32_nxv4f16( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_rtz.xu.f.v_nxv4i32_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfwcvt.rtz.xu.f.v v10, v8 -; CHECK-NEXT: vmv2r.v v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv4i32.nxv4f16( - %0, - i32 %1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv4i32.nxv4f16( - , - , - , - i32, - i32); - -define @intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv4i32_nxv4f16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv4i32_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfwcvt.rtz.xu.f.v v8, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv4i32.nxv4f16( - %0, - %1, - %2, - i32 %3, i32 1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv8i32.nxv8f16( - , - i32); - -define @intrinsic_vfwcvt_rtz.xu.f.v_nxv8i32_nxv8f16( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_rtz.xu.f.v_nxv8i32_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfwcvt.rtz.xu.f.v v12, v8 -; CHECK-NEXT: vmv4r.v v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv8i32.nxv8f16( - %0, - i32 %1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv8i32.nxv8f16( - , - , - , - i32, - i32); - -define @intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv8i32_nxv8f16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv8i32_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfwcvt.rtz.xu.f.v v8, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv8i32.nxv8f16( - %0, - %1, - %2, - i32 %3, i32 1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv16i32.nxv16f16( - , - i32); - -define @intrinsic_vfwcvt_rtz.xu.f.v_nxv16i32_nxv16f16( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_rtz.xu.f.v_nxv16i32_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfwcvt.rtz.xu.f.v v16, v8 -; CHECK-NEXT: vmv8r.v v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv16i32.nxv16f16( - %0, - i32 %1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv16i32.nxv16f16( - , - , - , - i32, - i32); - -define @intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv16i32_nxv16f16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv16i32_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfwcvt.rtz.xu.f.v v8, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv16i32.nxv16f16( - %0, - %1, - %2, - i32 %3, i32 1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv1i64.nxv1f32( - , - i32); - -define @intrinsic_vfwcvt_rtz.xu.f.v_nxv1i64_nxv1f32( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_rtz.xu.f.v_nxv1i64_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfwcvt.rtz.xu.f.v v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv1i64.nxv1f32( - %0, - i32 %1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv1i64.nxv1f32( - , - , - , - i32, - i32); - -define @intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv1i64_nxv1f32( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv1i64_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfwcvt.rtz.xu.f.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv1i64.nxv1f32( - %0, - %1, - %2, - i32 %3, i32 1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv2i64.nxv2f32( - , - i32); - -define @intrinsic_vfwcvt_rtz.xu.f.v_nxv2i64_nxv2f32( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_rtz.xu.f.v_nxv2i64_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfwcvt.rtz.xu.f.v v10, v8 -; CHECK-NEXT: vmv2r.v v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv2i64.nxv2f32( - %0, - i32 %1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv2i64.nxv2f32( - , - , - , - i32, - i32); - -define @intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv2i64_nxv2f32( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv2i64_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfwcvt.rtz.xu.f.v v8, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv2i64.nxv2f32( - %0, - %1, - %2, - i32 %3, i32 1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv4i64.nxv4f32( - , - i32); - -define @intrinsic_vfwcvt_rtz.xu.f.v_nxv4i64_nxv4f32( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_rtz.xu.f.v_nxv4i64_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfwcvt.rtz.xu.f.v v12, v8 -; CHECK-NEXT: vmv4r.v v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv4i64.nxv4f32( - %0, - i32 %1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv4i64.nxv4f32( - , - , - , - i32, - i32); - -define @intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv4i64_nxv4f32( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv4i64_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfwcvt.rtz.xu.f.v v8, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv4i64.nxv4f32( - %0, - %1, - %2, - i32 %3, i32 1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv8i64.nxv8f32( - , - i32); - -define @intrinsic_vfwcvt_rtz.xu.f.v_nxv8i64_nxv8f32( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_rtz.xu.f.v_nxv8i64_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfwcvt.rtz.xu.f.v v16, v8 -; CHECK-NEXT: vmv8r.v v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv8i64.nxv8f32( - %0, - i32 %1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv8i64.nxv8f32( - , - , - , - i32, - i32); - -define @intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv8i64_nxv8f32( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv8i64_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfwcvt.rtz.xu.f.v v8, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv8i64.nxv8f32( - %0, - %1, - %2, - i32 %3, i32 1) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-xu-f-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-xu-f.ll similarity index 86% rename from llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-xu-f-rv64.ll rename to llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-xu-f.ll index 6b881aba8f652bc0b6603a2a7c5c05c33e92c6c6..f3d786a37fbe459bb7bf3beb14eb919db92a3274 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-xu-f-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-xu-f.ll @@ -1,11 +1,13 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv1i32.nxv1f16( , - i64); + iXLen); -define @intrinsic_vfwcvt_rtz.xu.f.v_nxv1i32_nxv1f16( %0, i64 %1) nounwind { +define @intrinsic_vfwcvt_rtz.xu.f.v_nxv1i32_nxv1f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_rtz.xu.f.v_nxv1i32_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -15,7 +17,7 @@ define @intrinsic_vfwcvt_rtz.xu.f.v_nxv1i32_nxv1f16( @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv1i32.nxv1f16( %0, - i64 %1) + iXLen %1) ret %a } @@ -24,10 +26,10 @@ declare @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv1i32.nxv1f16( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv1i32_nxv1f16( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv1i32_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv1i32_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -38,16 +40,16 @@ entry: %0, %1, %2, - i64 %3, i64 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv2i32.nxv2f16( , - i64); + iXLen); -define @intrinsic_vfwcvt_rtz.xu.f.v_nxv2i32_nxv2f16( %0, i64 %1) nounwind { +define @intrinsic_vfwcvt_rtz.xu.f.v_nxv2i32_nxv2f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_rtz.xu.f.v_nxv2i32_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -57,7 +59,7 @@ define @intrinsic_vfwcvt_rtz.xu.f.v_nxv2i32_nxv2f16( @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv2i32.nxv2f16( %0, - i64 %1) + iXLen %1) ret %a } @@ -66,10 +68,10 @@ declare @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv2i32.nxv2f16( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv2i32_nxv2f16( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv2i32_nxv2f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv2i32_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -80,16 +82,16 @@ entry: %0, %1, %2, - i64 %3, i64 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv4i32.nxv4f16( , - i64); + iXLen); -define @intrinsic_vfwcvt_rtz.xu.f.v_nxv4i32_nxv4f16( %0, i64 %1) nounwind { +define @intrinsic_vfwcvt_rtz.xu.f.v_nxv4i32_nxv4f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_rtz.xu.f.v_nxv4i32_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -99,7 +101,7 @@ define @intrinsic_vfwcvt_rtz.xu.f.v_nxv4i32_nxv4f16( @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv4i32.nxv4f16( %0, - i64 %1) + iXLen %1) ret %a } @@ -108,10 +110,10 @@ declare @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv4i32.nxv4f16( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv4i32_nxv4f16( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv4i32_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv4i32_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -122,16 +124,16 @@ entry: %0, %1, %2, - i64 %3, i64 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv8i32.nxv8f16( , - i64); + iXLen); -define @intrinsic_vfwcvt_rtz.xu.f.v_nxv8i32_nxv8f16( %0, i64 %1) nounwind { +define @intrinsic_vfwcvt_rtz.xu.f.v_nxv8i32_nxv8f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_rtz.xu.f.v_nxv8i32_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -141,7 +143,7 @@ define @intrinsic_vfwcvt_rtz.xu.f.v_nxv8i32_nxv8f16( @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv8i32.nxv8f16( %0, - i64 %1) + iXLen %1) ret %a } @@ -150,10 +152,10 @@ declare @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv8i32.nxv8f16( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv8i32_nxv8f16( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv8i32_nxv8f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv8i32_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -164,16 +166,16 @@ entry: %0, %1, %2, - i64 %3, i64 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv16i32.nxv16f16( , - i64); + iXLen); -define @intrinsic_vfwcvt_rtz.xu.f.v_nxv16i32_nxv16f16( %0, i64 %1) nounwind { +define @intrinsic_vfwcvt_rtz.xu.f.v_nxv16i32_nxv16f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_rtz.xu.f.v_nxv16i32_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -183,7 +185,7 @@ define @intrinsic_vfwcvt_rtz.xu.f.v_nxv16i32_nxv16f16( @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv16i32.nxv16f16( %0, - i64 %1) + iXLen %1) ret %a } @@ -192,10 +194,10 @@ declare @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv16i32.nxv16f16 , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv16i32_nxv16f16( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv16i32_nxv16f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv16i32_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -206,16 +208,16 @@ entry: %0, %1, %2, - i64 %3, i64 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv1i64.nxv1f32( , - i64); + iXLen); -define @intrinsic_vfwcvt_rtz.xu.f.v_nxv1i64_nxv1f32( %0, i64 %1) nounwind { +define @intrinsic_vfwcvt_rtz.xu.f.v_nxv1i64_nxv1f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_rtz.xu.f.v_nxv1i64_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -225,7 +227,7 @@ define @intrinsic_vfwcvt_rtz.xu.f.v_nxv1i64_nxv1f32( @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv1i64.nxv1f32( %0, - i64 %1) + iXLen %1) ret %a } @@ -234,10 +236,10 @@ declare @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv1i64.nxv1f32( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv1i64_nxv1f32( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv1i64_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv1i64_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -248,16 +250,16 @@ entry: %0, %1, %2, - i64 %3, i64 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv2i64.nxv2f32( , - i64); + iXLen); -define @intrinsic_vfwcvt_rtz.xu.f.v_nxv2i64_nxv2f32( %0, i64 %1) nounwind { +define @intrinsic_vfwcvt_rtz.xu.f.v_nxv2i64_nxv2f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_rtz.xu.f.v_nxv2i64_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -267,7 +269,7 @@ define @intrinsic_vfwcvt_rtz.xu.f.v_nxv2i64_nxv2f32( @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv2i64.nxv2f32( %0, - i64 %1) + iXLen %1) ret %a } @@ -276,10 +278,10 @@ declare @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv2i64.nxv2f32( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv2i64_nxv2f32( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv2i64_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv2i64_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -290,16 +292,16 @@ entry: %0, %1, %2, - i64 %3, i64 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv4i64.nxv4f32( , - i64); + iXLen); -define @intrinsic_vfwcvt_rtz.xu.f.v_nxv4i64_nxv4f32( %0, i64 %1) nounwind { +define @intrinsic_vfwcvt_rtz.xu.f.v_nxv4i64_nxv4f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_rtz.xu.f.v_nxv4i64_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -309,7 +311,7 @@ define @intrinsic_vfwcvt_rtz.xu.f.v_nxv4i64_nxv4f32( @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv4i64.nxv4f32( %0, - i64 %1) + iXLen %1) ret %a } @@ -318,10 +320,10 @@ declare @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv4i64.nxv4f32( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv4i64_nxv4f32( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv4i64_nxv4f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv4i64_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -332,16 +334,16 @@ entry: %0, %1, %2, - i64 %3, i64 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv8i64.nxv8f32( , - i64); + iXLen); -define @intrinsic_vfwcvt_rtz.xu.f.v_nxv8i64_nxv8f32( %0, i64 %1) nounwind { +define @intrinsic_vfwcvt_rtz.xu.f.v_nxv8i64_nxv8f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_rtz.xu.f.v_nxv8i64_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -351,7 +353,7 @@ define @intrinsic_vfwcvt_rtz.xu.f.v_nxv8i64_nxv8f32( @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv8i64.nxv8f32( %0, - i64 %1) + iXLen %1) ret %a } @@ -360,10 +362,10 @@ declare @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv8i64.nxv8f32( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv8i64_nxv8f32( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv8i64_nxv8f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv8i64_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -374,7 +376,7 @@ entry: %0, %1, %2, - i64 %3, i64 1) + iXLen %3, iXLen 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-x-f-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-x-f-rv64.ll deleted file mode 100644 index fd01c64df0d36b0fc25a54bc04282dd9a7a10d3b..0000000000000000000000000000000000000000 --- a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-x-f-rv64.ll +++ /dev/null @@ -1,380 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s -declare @llvm.riscv.vfwcvt.x.f.v.nxv1i32.nxv1f16( - , - i64); - -define @intrinsic_vfwcvt_x.f.v_nxv1i32_nxv1f16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_x.f.v_nxv1i32_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfwcvt.x.f.v v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.x.f.v.nxv1i32.nxv1f16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.x.f.v.mask.nxv1i32.nxv1f16( - , - , - , - i64, - i64); - -define @intrinsic_vfwcvt_mask_x.f.v_nxv1i32_nxv1f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_mask_x.f.v_nxv1i32_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfwcvt.x.f.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv1i32.nxv1f16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.x.f.v.nxv2i32.nxv2f16( - , - i64); - -define @intrinsic_vfwcvt_x.f.v_nxv2i32_nxv2f16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_x.f.v_nxv2i32_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfwcvt.x.f.v v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.x.f.v.nxv2i32.nxv2f16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.x.f.v.mask.nxv2i32.nxv2f16( - , - , - , - i64, - i64); - -define @intrinsic_vfwcvt_mask_x.f.v_nxv2i32_nxv2f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_mask_x.f.v_nxv2i32_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfwcvt.x.f.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv2i32.nxv2f16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.x.f.v.nxv4i32.nxv4f16( - , - i64); - -define @intrinsic_vfwcvt_x.f.v_nxv4i32_nxv4f16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_x.f.v_nxv4i32_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfwcvt.x.f.v v10, v8 -; CHECK-NEXT: vmv2r.v v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.x.f.v.nxv4i32.nxv4f16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.x.f.v.mask.nxv4i32.nxv4f16( - , - , - , - i64, - i64); - -define @intrinsic_vfwcvt_mask_x.f.v_nxv4i32_nxv4f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_mask_x.f.v_nxv4i32_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfwcvt.x.f.v v8, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv4i32.nxv4f16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.x.f.v.nxv8i32.nxv8f16( - , - i64); - -define @intrinsic_vfwcvt_x.f.v_nxv8i32_nxv8f16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_x.f.v_nxv8i32_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfwcvt.x.f.v v12, v8 -; CHECK-NEXT: vmv4r.v v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.x.f.v.nxv8i32.nxv8f16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.x.f.v.mask.nxv8i32.nxv8f16( - , - , - , - i64, - i64); - -define @intrinsic_vfwcvt_mask_x.f.v_nxv8i32_nxv8f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_mask_x.f.v_nxv8i32_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfwcvt.x.f.v v8, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv8i32.nxv8f16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.x.f.v.nxv16i32.nxv16f16( - , - i64); - -define @intrinsic_vfwcvt_x.f.v_nxv16i32_nxv16f16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_x.f.v_nxv16i32_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfwcvt.x.f.v v16, v8 -; CHECK-NEXT: vmv8r.v v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.x.f.v.nxv16i32.nxv16f16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.x.f.v.mask.nxv16i32.nxv16f16( - , - , - , - i64, - i64); - -define @intrinsic_vfwcvt_mask_x.f.v_nxv16i32_nxv16f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_mask_x.f.v_nxv16i32_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfwcvt.x.f.v v8, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv16i32.nxv16f16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.x.f.v.nxv1i64.nxv1f32( - , - i64); - -define @intrinsic_vfwcvt_x.f.v_nxv1i64_nxv1f32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_x.f.v_nxv1i64_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfwcvt.x.f.v v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.x.f.v.nxv1i64.nxv1f32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.x.f.v.mask.nxv1i64.nxv1f32( - , - , - , - i64, - i64); - -define @intrinsic_vfwcvt_mask_x.f.v_nxv1i64_nxv1f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_mask_x.f.v_nxv1i64_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfwcvt.x.f.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv1i64.nxv1f32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.x.f.v.nxv2i64.nxv2f32( - , - i64); - -define @intrinsic_vfwcvt_x.f.v_nxv2i64_nxv2f32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_x.f.v_nxv2i64_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfwcvt.x.f.v v10, v8 -; CHECK-NEXT: vmv2r.v v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.x.f.v.nxv2i64.nxv2f32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.x.f.v.mask.nxv2i64.nxv2f32( - , - , - , - i64, - i64); - -define @intrinsic_vfwcvt_mask_x.f.v_nxv2i64_nxv2f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_mask_x.f.v_nxv2i64_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfwcvt.x.f.v v8, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv2i64.nxv2f32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.x.f.v.nxv4i64.nxv4f32( - , - i64); - -define @intrinsic_vfwcvt_x.f.v_nxv4i64_nxv4f32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_x.f.v_nxv4i64_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfwcvt.x.f.v v12, v8 -; CHECK-NEXT: vmv4r.v v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.x.f.v.nxv4i64.nxv4f32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.x.f.v.mask.nxv4i64.nxv4f32( - , - , - , - i64, - i64); - -define @intrinsic_vfwcvt_mask_x.f.v_nxv4i64_nxv4f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_mask_x.f.v_nxv4i64_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfwcvt.x.f.v v8, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv4i64.nxv4f32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.x.f.v.nxv8i64.nxv8f32( - , - i64); - -define @intrinsic_vfwcvt_x.f.v_nxv8i64_nxv8f32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_x.f.v_nxv8i64_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfwcvt.x.f.v v16, v8 -; CHECK-NEXT: vmv8r.v v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.x.f.v.nxv8i64.nxv8f32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.x.f.v.mask.nxv8i64.nxv8f32( - , - , - , - i64, - i64); - -define @intrinsic_vfwcvt_mask_x.f.v_nxv8i64_nxv8f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_mask_x.f.v_nxv8i64_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfwcvt.x.f.v v8, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv8i64.nxv8f32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-x-f-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-x-f.ll similarity index 87% rename from llvm/test/CodeGen/RISCV/rvv/vfwcvt-x-f-rv32.ll rename to llvm/test/CodeGen/RISCV/rvv/vfwcvt-x-f.ll index b8d88e1c64e55d4a385511ff1e0de6246f5efd91..6c3c2d7702eb1b10625b09acc054586feddeea5b 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-x-f-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-x-f.ll @@ -1,11 +1,13 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfwcvt.x.f.v.nxv1i32.nxv1f16( , - i32); + iXLen); -define @intrinsic_vfwcvt_x.f.v_nxv1i32_nxv1f16( %0, i32 %1) nounwind { +define @intrinsic_vfwcvt_x.f.v_nxv1i32_nxv1f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_x.f.v_nxv1i32_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -15,7 +17,7 @@ define @intrinsic_vfwcvt_x.f.v_nxv1i32_nxv1f16( @llvm.riscv.vfwcvt.x.f.v.nxv1i32.nxv1f16( %0, - i32 %1) + iXLen %1) ret %a } @@ -24,10 +26,10 @@ declare @llvm.riscv.vfwcvt.x.f.v.mask.nxv1i32.nxv1f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwcvt_mask_x.f.v_nxv1i32_nxv1f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwcvt_mask_x.f.v_nxv1i32_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_x.f.v_nxv1i32_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -38,16 +40,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfwcvt.x.f.v.nxv2i32.nxv2f16( , - i32); + iXLen); -define @intrinsic_vfwcvt_x.f.v_nxv2i32_nxv2f16( %0, i32 %1) nounwind { +define @intrinsic_vfwcvt_x.f.v_nxv2i32_nxv2f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_x.f.v_nxv2i32_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -57,7 +59,7 @@ define @intrinsic_vfwcvt_x.f.v_nxv2i32_nxv2f16( @llvm.riscv.vfwcvt.x.f.v.nxv2i32.nxv2f16( %0, - i32 %1) + iXLen %1) ret %a } @@ -66,10 +68,10 @@ declare @llvm.riscv.vfwcvt.x.f.v.mask.nxv2i32.nxv2f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwcvt_mask_x.f.v_nxv2i32_nxv2f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwcvt_mask_x.f.v_nxv2i32_nxv2f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_x.f.v_nxv2i32_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -80,16 +82,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfwcvt.x.f.v.nxv4i32.nxv4f16( , - i32); + iXLen); -define @intrinsic_vfwcvt_x.f.v_nxv4i32_nxv4f16( %0, i32 %1) nounwind { +define @intrinsic_vfwcvt_x.f.v_nxv4i32_nxv4f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_x.f.v_nxv4i32_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -99,7 +101,7 @@ define @intrinsic_vfwcvt_x.f.v_nxv4i32_nxv4f16( @llvm.riscv.vfwcvt.x.f.v.nxv4i32.nxv4f16( %0, - i32 %1) + iXLen %1) ret %a } @@ -108,10 +110,10 @@ declare @llvm.riscv.vfwcvt.x.f.v.mask.nxv4i32.nxv4f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwcvt_mask_x.f.v_nxv4i32_nxv4f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwcvt_mask_x.f.v_nxv4i32_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_x.f.v_nxv4i32_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -122,16 +124,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfwcvt.x.f.v.nxv8i32.nxv8f16( , - i32); + iXLen); -define @intrinsic_vfwcvt_x.f.v_nxv8i32_nxv8f16( %0, i32 %1) nounwind { +define @intrinsic_vfwcvt_x.f.v_nxv8i32_nxv8f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_x.f.v_nxv8i32_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -141,7 +143,7 @@ define @intrinsic_vfwcvt_x.f.v_nxv8i32_nxv8f16( @llvm.riscv.vfwcvt.x.f.v.nxv8i32.nxv8f16( %0, - i32 %1) + iXLen %1) ret %a } @@ -150,10 +152,10 @@ declare @llvm.riscv.vfwcvt.x.f.v.mask.nxv8i32.nxv8f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwcvt_mask_x.f.v_nxv8i32_nxv8f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwcvt_mask_x.f.v_nxv8i32_nxv8f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_x.f.v_nxv8i32_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -164,16 +166,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfwcvt.x.f.v.nxv16i32.nxv16f16( , - i32); + iXLen); -define @intrinsic_vfwcvt_x.f.v_nxv16i32_nxv16f16( %0, i32 %1) nounwind { +define @intrinsic_vfwcvt_x.f.v_nxv16i32_nxv16f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_x.f.v_nxv16i32_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -183,7 +185,7 @@ define @intrinsic_vfwcvt_x.f.v_nxv16i32_nxv16f16( @llvm.riscv.vfwcvt.x.f.v.nxv16i32.nxv16f16( %0, - i32 %1) + iXLen %1) ret %a } @@ -192,10 +194,10 @@ declare @llvm.riscv.vfwcvt.x.f.v.mask.nxv16i32.nxv16f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwcvt_mask_x.f.v_nxv16i32_nxv16f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwcvt_mask_x.f.v_nxv16i32_nxv16f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_x.f.v_nxv16i32_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -206,16 +208,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfwcvt.x.f.v.nxv1i64.nxv1f32( , - i32); + iXLen); -define @intrinsic_vfwcvt_x.f.v_nxv1i64_nxv1f32( %0, i32 %1) nounwind { +define @intrinsic_vfwcvt_x.f.v_nxv1i64_nxv1f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_x.f.v_nxv1i64_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -225,7 +227,7 @@ define @intrinsic_vfwcvt_x.f.v_nxv1i64_nxv1f32( @llvm.riscv.vfwcvt.x.f.v.nxv1i64.nxv1f32( %0, - i32 %1) + iXLen %1) ret %a } @@ -234,10 +236,10 @@ declare @llvm.riscv.vfwcvt.x.f.v.mask.nxv1i64.nxv1f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwcvt_mask_x.f.v_nxv1i64_nxv1f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwcvt_mask_x.f.v_nxv1i64_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_x.f.v_nxv1i64_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -248,16 +250,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfwcvt.x.f.v.nxv2i64.nxv2f32( , - i32); + iXLen); -define @intrinsic_vfwcvt_x.f.v_nxv2i64_nxv2f32( %0, i32 %1) nounwind { +define @intrinsic_vfwcvt_x.f.v_nxv2i64_nxv2f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_x.f.v_nxv2i64_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -267,7 +269,7 @@ define @intrinsic_vfwcvt_x.f.v_nxv2i64_nxv2f32( @llvm.riscv.vfwcvt.x.f.v.nxv2i64.nxv2f32( %0, - i32 %1) + iXLen %1) ret %a } @@ -276,10 +278,10 @@ declare @llvm.riscv.vfwcvt.x.f.v.mask.nxv2i64.nxv2f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwcvt_mask_x.f.v_nxv2i64_nxv2f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwcvt_mask_x.f.v_nxv2i64_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_x.f.v_nxv2i64_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -290,16 +292,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfwcvt.x.f.v.nxv4i64.nxv4f32( , - i32); + iXLen); -define @intrinsic_vfwcvt_x.f.v_nxv4i64_nxv4f32( %0, i32 %1) nounwind { +define @intrinsic_vfwcvt_x.f.v_nxv4i64_nxv4f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_x.f.v_nxv4i64_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -309,7 +311,7 @@ define @intrinsic_vfwcvt_x.f.v_nxv4i64_nxv4f32( @llvm.riscv.vfwcvt.x.f.v.nxv4i64.nxv4f32( %0, - i32 %1) + iXLen %1) ret %a } @@ -318,10 +320,10 @@ declare @llvm.riscv.vfwcvt.x.f.v.mask.nxv4i64.nxv4f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwcvt_mask_x.f.v_nxv4i64_nxv4f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwcvt_mask_x.f.v_nxv4i64_nxv4f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_x.f.v_nxv4i64_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -332,16 +334,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfwcvt.x.f.v.nxv8i64.nxv8f32( , - i32); + iXLen); -define @intrinsic_vfwcvt_x.f.v_nxv8i64_nxv8f32( %0, i32 %1) nounwind { +define @intrinsic_vfwcvt_x.f.v_nxv8i64_nxv8f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_x.f.v_nxv8i64_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -351,7 +353,7 @@ define @intrinsic_vfwcvt_x.f.v_nxv8i64_nxv8f32( @llvm.riscv.vfwcvt.x.f.v.nxv8i64.nxv8f32( %0, - i32 %1) + iXLen %1) ret %a } @@ -360,10 +362,10 @@ declare @llvm.riscv.vfwcvt.x.f.v.mask.nxv8i64.nxv8f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwcvt_mask_x.f.v_nxv8i64_nxv8f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwcvt_mask_x.f.v_nxv8i64_nxv8f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_x.f.v_nxv8i64_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -374,7 +376,7 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-xu-f-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-xu-f-rv64.ll deleted file mode 100644 index dc461d60b0be985972b6d1e080d2692b7b1ac61f..0000000000000000000000000000000000000000 --- a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-xu-f-rv64.ll +++ /dev/null @@ -1,380 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s -declare @llvm.riscv.vfwcvt.xu.f.v.nxv1i32.nxv1f16( - , - i64); - -define @intrinsic_vfwcvt_xu.f.v_nxv1i32_nxv1f16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_xu.f.v_nxv1i32_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfwcvt.xu.f.v v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.xu.f.v.nxv1i32.nxv1f16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.xu.f.v.mask.nxv1i32.nxv1f16( - , - , - , - i64, - i64); - -define @intrinsic_vfwcvt_mask_xu.f.v_nxv1i32_nxv1f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_nxv1i32_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfwcvt.xu.f.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv1i32.nxv1f16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.xu.f.v.nxv2i32.nxv2f16( - , - i64); - -define @intrinsic_vfwcvt_xu.f.v_nxv2i32_nxv2f16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_xu.f.v_nxv2i32_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfwcvt.xu.f.v v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.xu.f.v.nxv2i32.nxv2f16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.xu.f.v.mask.nxv2i32.nxv2f16( - , - , - , - i64, - i64); - -define @intrinsic_vfwcvt_mask_xu.f.v_nxv2i32_nxv2f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_nxv2i32_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfwcvt.xu.f.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv2i32.nxv2f16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.xu.f.v.nxv4i32.nxv4f16( - , - i64); - -define @intrinsic_vfwcvt_xu.f.v_nxv4i32_nxv4f16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_xu.f.v_nxv4i32_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfwcvt.xu.f.v v10, v8 -; CHECK-NEXT: vmv2r.v v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.xu.f.v.nxv4i32.nxv4f16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.xu.f.v.mask.nxv4i32.nxv4f16( - , - , - , - i64, - i64); - -define @intrinsic_vfwcvt_mask_xu.f.v_nxv4i32_nxv4f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_nxv4i32_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfwcvt.xu.f.v v8, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv4i32.nxv4f16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.xu.f.v.nxv8i32.nxv8f16( - , - i64); - -define @intrinsic_vfwcvt_xu.f.v_nxv8i32_nxv8f16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_xu.f.v_nxv8i32_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfwcvt.xu.f.v v12, v8 -; CHECK-NEXT: vmv4r.v v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.xu.f.v.nxv8i32.nxv8f16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.xu.f.v.mask.nxv8i32.nxv8f16( - , - , - , - i64, - i64); - -define @intrinsic_vfwcvt_mask_xu.f.v_nxv8i32_nxv8f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_nxv8i32_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfwcvt.xu.f.v v8, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv8i32.nxv8f16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.xu.f.v.nxv16i32.nxv16f16( - , - i64); - -define @intrinsic_vfwcvt_xu.f.v_nxv16i32_nxv16f16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_xu.f.v_nxv16i32_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfwcvt.xu.f.v v16, v8 -; CHECK-NEXT: vmv8r.v v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.xu.f.v.nxv16i32.nxv16f16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.xu.f.v.mask.nxv16i32.nxv16f16( - , - , - , - i64, - i64); - -define @intrinsic_vfwcvt_mask_xu.f.v_nxv16i32_nxv16f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_nxv16i32_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfwcvt.xu.f.v v8, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv16i32.nxv16f16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.xu.f.v.nxv1i64.nxv1f32( - , - i64); - -define @intrinsic_vfwcvt_xu.f.v_nxv1i64_nxv1f32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_xu.f.v_nxv1i64_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfwcvt.xu.f.v v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.xu.f.v.nxv1i64.nxv1f32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.xu.f.v.mask.nxv1i64.nxv1f32( - , - , - , - i64, - i64); - -define @intrinsic_vfwcvt_mask_xu.f.v_nxv1i64_nxv1f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_nxv1i64_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfwcvt.xu.f.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv1i64.nxv1f32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.xu.f.v.nxv2i64.nxv2f32( - , - i64); - -define @intrinsic_vfwcvt_xu.f.v_nxv2i64_nxv2f32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_xu.f.v_nxv2i64_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfwcvt.xu.f.v v10, v8 -; CHECK-NEXT: vmv2r.v v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.xu.f.v.nxv2i64.nxv2f32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.xu.f.v.mask.nxv2i64.nxv2f32( - , - , - , - i64, - i64); - -define @intrinsic_vfwcvt_mask_xu.f.v_nxv2i64_nxv2f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_nxv2i64_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfwcvt.xu.f.v v8, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv2i64.nxv2f32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.xu.f.v.nxv4i64.nxv4f32( - , - i64); - -define @intrinsic_vfwcvt_xu.f.v_nxv4i64_nxv4f32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_xu.f.v_nxv4i64_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfwcvt.xu.f.v v12, v8 -; CHECK-NEXT: vmv4r.v v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.xu.f.v.nxv4i64.nxv4f32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.xu.f.v.mask.nxv4i64.nxv4f32( - , - , - , - i64, - i64); - -define @intrinsic_vfwcvt_mask_xu.f.v_nxv4i64_nxv4f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_nxv4i64_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfwcvt.xu.f.v v8, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv4i64.nxv4f32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.xu.f.v.nxv8i64.nxv8f32( - , - i64); - -define @intrinsic_vfwcvt_xu.f.v_nxv8i64_nxv8f32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_xu.f.v_nxv8i64_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfwcvt.xu.f.v v16, v8 -; CHECK-NEXT: vmv8r.v v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.xu.f.v.nxv8i64.nxv8f32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.xu.f.v.mask.nxv8i64.nxv8f32( - , - , - , - i64, - i64); - -define @intrinsic_vfwcvt_mask_xu.f.v_nxv8i64_nxv8f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_nxv8i64_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfwcvt.xu.f.v v8, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv8i64.nxv8f32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-xu-f-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-xu-f.ll similarity index 87% rename from llvm/test/CodeGen/RISCV/rvv/vfwcvt-xu-f-rv32.ll rename to llvm/test/CodeGen/RISCV/rvv/vfwcvt-xu-f.ll index c2b0a222709e4f03de7e0d514f633876bf980a4a..10bd22304ed8382096296cf6d8c70447cf47737c 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-xu-f-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-xu-f.ll @@ -1,11 +1,13 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfwcvt.xu.f.v.nxv1i32.nxv1f16( , - i32); + iXLen); -define @intrinsic_vfwcvt_xu.f.v_nxv1i32_nxv1f16( %0, i32 %1) nounwind { +define @intrinsic_vfwcvt_xu.f.v_nxv1i32_nxv1f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_xu.f.v_nxv1i32_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -15,7 +17,7 @@ define @intrinsic_vfwcvt_xu.f.v_nxv1i32_nxv1f16( @llvm.riscv.vfwcvt.xu.f.v.nxv1i32.nxv1f16( %0, - i32 %1) + iXLen %1) ret %a } @@ -24,10 +26,10 @@ declare @llvm.riscv.vfwcvt.xu.f.v.mask.nxv1i32.nxv1f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwcvt_mask_xu.f.v_nxv1i32_nxv1f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwcvt_mask_xu.f.v_nxv1i32_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_nxv1i32_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -38,16 +40,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfwcvt.xu.f.v.nxv2i32.nxv2f16( , - i32); + iXLen); -define @intrinsic_vfwcvt_xu.f.v_nxv2i32_nxv2f16( %0, i32 %1) nounwind { +define @intrinsic_vfwcvt_xu.f.v_nxv2i32_nxv2f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_xu.f.v_nxv2i32_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -57,7 +59,7 @@ define @intrinsic_vfwcvt_xu.f.v_nxv2i32_nxv2f16( @llvm.riscv.vfwcvt.xu.f.v.nxv2i32.nxv2f16( %0, - i32 %1) + iXLen %1) ret %a } @@ -66,10 +68,10 @@ declare @llvm.riscv.vfwcvt.xu.f.v.mask.nxv2i32.nxv2f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwcvt_mask_xu.f.v_nxv2i32_nxv2f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwcvt_mask_xu.f.v_nxv2i32_nxv2f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_nxv2i32_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -80,16 +82,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfwcvt.xu.f.v.nxv4i32.nxv4f16( , - i32); + iXLen); -define @intrinsic_vfwcvt_xu.f.v_nxv4i32_nxv4f16( %0, i32 %1) nounwind { +define @intrinsic_vfwcvt_xu.f.v_nxv4i32_nxv4f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_xu.f.v_nxv4i32_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -99,7 +101,7 @@ define @intrinsic_vfwcvt_xu.f.v_nxv4i32_nxv4f16( @llvm.riscv.vfwcvt.xu.f.v.nxv4i32.nxv4f16( %0, - i32 %1) + iXLen %1) ret %a } @@ -108,10 +110,10 @@ declare @llvm.riscv.vfwcvt.xu.f.v.mask.nxv4i32.nxv4f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwcvt_mask_xu.f.v_nxv4i32_nxv4f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwcvt_mask_xu.f.v_nxv4i32_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_nxv4i32_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -122,16 +124,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfwcvt.xu.f.v.nxv8i32.nxv8f16( , - i32); + iXLen); -define @intrinsic_vfwcvt_xu.f.v_nxv8i32_nxv8f16( %0, i32 %1) nounwind { +define @intrinsic_vfwcvt_xu.f.v_nxv8i32_nxv8f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_xu.f.v_nxv8i32_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -141,7 +143,7 @@ define @intrinsic_vfwcvt_xu.f.v_nxv8i32_nxv8f16( @llvm.riscv.vfwcvt.xu.f.v.nxv8i32.nxv8f16( %0, - i32 %1) + iXLen %1) ret %a } @@ -150,10 +152,10 @@ declare @llvm.riscv.vfwcvt.xu.f.v.mask.nxv8i32.nxv8f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwcvt_mask_xu.f.v_nxv8i32_nxv8f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwcvt_mask_xu.f.v_nxv8i32_nxv8f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_nxv8i32_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -164,16 +166,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfwcvt.xu.f.v.nxv16i32.nxv16f16( , - i32); + iXLen); -define @intrinsic_vfwcvt_xu.f.v_nxv16i32_nxv16f16( %0, i32 %1) nounwind { +define @intrinsic_vfwcvt_xu.f.v_nxv16i32_nxv16f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_xu.f.v_nxv16i32_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -183,7 +185,7 @@ define @intrinsic_vfwcvt_xu.f.v_nxv16i32_nxv16f16( @llvm.riscv.vfwcvt.xu.f.v.nxv16i32.nxv16f16( %0, - i32 %1) + iXLen %1) ret %a } @@ -192,10 +194,10 @@ declare @llvm.riscv.vfwcvt.xu.f.v.mask.nxv16i32.nxv16f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwcvt_mask_xu.f.v_nxv16i32_nxv16f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwcvt_mask_xu.f.v_nxv16i32_nxv16f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_nxv16i32_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -206,16 +208,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfwcvt.xu.f.v.nxv1i64.nxv1f32( , - i32); + iXLen); -define @intrinsic_vfwcvt_xu.f.v_nxv1i64_nxv1f32( %0, i32 %1) nounwind { +define @intrinsic_vfwcvt_xu.f.v_nxv1i64_nxv1f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_xu.f.v_nxv1i64_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -225,7 +227,7 @@ define @intrinsic_vfwcvt_xu.f.v_nxv1i64_nxv1f32( @llvm.riscv.vfwcvt.xu.f.v.nxv1i64.nxv1f32( %0, - i32 %1) + iXLen %1) ret %a } @@ -234,10 +236,10 @@ declare @llvm.riscv.vfwcvt.xu.f.v.mask.nxv1i64.nxv1f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwcvt_mask_xu.f.v_nxv1i64_nxv1f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwcvt_mask_xu.f.v_nxv1i64_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_nxv1i64_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -248,16 +250,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfwcvt.xu.f.v.nxv2i64.nxv2f32( , - i32); + iXLen); -define @intrinsic_vfwcvt_xu.f.v_nxv2i64_nxv2f32( %0, i32 %1) nounwind { +define @intrinsic_vfwcvt_xu.f.v_nxv2i64_nxv2f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_xu.f.v_nxv2i64_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -267,7 +269,7 @@ define @intrinsic_vfwcvt_xu.f.v_nxv2i64_nxv2f32( @llvm.riscv.vfwcvt.xu.f.v.nxv2i64.nxv2f32( %0, - i32 %1) + iXLen %1) ret %a } @@ -276,10 +278,10 @@ declare @llvm.riscv.vfwcvt.xu.f.v.mask.nxv2i64.nxv2f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwcvt_mask_xu.f.v_nxv2i64_nxv2f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwcvt_mask_xu.f.v_nxv2i64_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_nxv2i64_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -290,16 +292,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfwcvt.xu.f.v.nxv4i64.nxv4f32( , - i32); + iXLen); -define @intrinsic_vfwcvt_xu.f.v_nxv4i64_nxv4f32( %0, i32 %1) nounwind { +define @intrinsic_vfwcvt_xu.f.v_nxv4i64_nxv4f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_xu.f.v_nxv4i64_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -309,7 +311,7 @@ define @intrinsic_vfwcvt_xu.f.v_nxv4i64_nxv4f32( @llvm.riscv.vfwcvt.xu.f.v.nxv4i64.nxv4f32( %0, - i32 %1) + iXLen %1) ret %a } @@ -318,10 +320,10 @@ declare @llvm.riscv.vfwcvt.xu.f.v.mask.nxv4i64.nxv4f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwcvt_mask_xu.f.v_nxv4i64_nxv4f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwcvt_mask_xu.f.v_nxv4i64_nxv4f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_nxv4i64_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -332,16 +334,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfwcvt.xu.f.v.nxv8i64.nxv8f32( , - i32); + iXLen); -define @intrinsic_vfwcvt_xu.f.v_nxv8i64_nxv8f32( %0, i32 %1) nounwind { +define @intrinsic_vfwcvt_xu.f.v_nxv8i64_nxv8f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_xu.f.v_nxv8i64_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -351,7 +353,7 @@ define @intrinsic_vfwcvt_xu.f.v_nxv8i64_nxv8f32( @llvm.riscv.vfwcvt.xu.f.v.nxv8i64.nxv8f32( %0, - i32 %1) + iXLen %1) ret %a } @@ -360,10 +362,10 @@ declare @llvm.riscv.vfwcvt.xu.f.v.mask.nxv8i64.nxv8f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwcvt_mask_xu.f.v_nxv8i64_nxv8f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwcvt_mask_xu.f.v_nxv8i64_nxv8f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_nxv8i64_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -374,7 +376,7 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwmacc-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwmacc-rv64.ll deleted file mode 100644 index eb21c54c18e9061d31cf11049fed9c3c711f0694..0000000000000000000000000000000000000000 --- a/llvm/test/CodeGen/RISCV/rvv/vfwmacc-rv64.ll +++ /dev/null @@ -1,830 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: -target-abi=lp64d < %s | FileCheck %s -declare @llvm.riscv.vfwmacc.nxv1f32.nxv1f16( - , - , - , - i64); - -define @intrinsic_vfwmacc_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwmacc_vv_nxv1f32_nxv1f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu -; CHECK-NEXT: vfwmacc.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmacc.nxv1f32.nxv1f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1f16( - , - , - , - , - i64); - -define @intrinsic_vfwmacc_mask_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv1f32_nxv1f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu -; CHECK-NEXT: vfwmacc.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1f16( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwmacc.nxv2f32.nxv2f16( - , - , - , - i64); - -define @intrinsic_vfwmacc_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwmacc_vv_nxv2f32_nxv2f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu -; CHECK-NEXT: vfwmacc.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmacc.nxv2f32.nxv2f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwmacc.mask.nxv2f32.nxv2f16( - , - , - , - , - i64); - -define @intrinsic_vfwmacc_mask_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv2f32_nxv2f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu -; CHECK-NEXT: vfwmacc.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmacc.mask.nxv2f32.nxv2f16( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwmacc.nxv4f32.nxv4f16( - , - , - , - i64); - -define @intrinsic_vfwmacc_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwmacc_vv_nxv4f32_nxv4f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu -; CHECK-NEXT: vfwmacc.vv v8, v10, v11 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmacc.nxv4f32.nxv4f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwmacc.mask.nxv4f32.nxv4f16( - , - , - , - , - i64); - -define @intrinsic_vfwmacc_mask_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv4f32_nxv4f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu -; CHECK-NEXT: vfwmacc.vv v8, v10, v11, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmacc.mask.nxv4f32.nxv4f16( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwmacc.nxv8f32.nxv8f16( - , - , - , - i64); - -define @intrinsic_vfwmacc_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwmacc_vv_nxv8f32_nxv8f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu -; CHECK-NEXT: vfwmacc.vv v8, v12, v14 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmacc.nxv8f32.nxv8f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwmacc.mask.nxv8f32.nxv8f16( - , - , - , - , - i64); - -define @intrinsic_vfwmacc_mask_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv8f32_nxv8f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu -; CHECK-NEXT: vfwmacc.vv v8, v12, v14, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmacc.mask.nxv8f32.nxv8f16( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwmacc.nxv16f32.nxv16f16( - , - , - , - i64); - -define @intrinsic_vfwmacc_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwmacc_vv_nxv16f32_nxv16f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu -; CHECK-NEXT: vfwmacc.vv v8, v16, v20 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmacc.nxv16f32.nxv16f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwmacc.mask.nxv16f32.nxv16f16( - , - , - , - , - i64); - -define @intrinsic_vfwmacc_mask_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv16f32_nxv16f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu -; CHECK-NEXT: vfwmacc.vv v8, v16, v20, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmacc.mask.nxv16f32.nxv16f16( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwmacc.nxv1f64.nxv1f32( - , - , - , - i64); - -define @intrinsic_vfwmacc_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwmacc_vv_nxv1f64_nxv1f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu -; CHECK-NEXT: vfwmacc.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmacc.nxv1f64.nxv1f32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwmacc.mask.nxv1f64.nxv1f32( - , - , - , - , - i64); - -define @intrinsic_vfwmacc_mask_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv1f64_nxv1f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu -; CHECK-NEXT: vfwmacc.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmacc.mask.nxv1f64.nxv1f32( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwmacc.nxv2f64.nxv2f32( - , - , - , - i64); - -define @intrinsic_vfwmacc_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwmacc_vv_nxv2f64_nxv2f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu -; CHECK-NEXT: vfwmacc.vv v8, v10, v11 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmacc.nxv2f64.nxv2f32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwmacc.mask.nxv2f64.nxv2f32( - , - , - , - , - i64); - -define @intrinsic_vfwmacc_mask_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv2f64_nxv2f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu -; CHECK-NEXT: vfwmacc.vv v8, v10, v11, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmacc.mask.nxv2f64.nxv2f32( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwmacc.nxv4f64.nxv4f32( - , - , - , - i64); - -define @intrinsic_vfwmacc_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwmacc_vv_nxv4f64_nxv4f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu -; CHECK-NEXT: vfwmacc.vv v8, v12, v14 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmacc.nxv4f64.nxv4f32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwmacc.mask.nxv4f64.nxv4f32( - , - , - , - , - i64); - -define @intrinsic_vfwmacc_mask_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv4f64_nxv4f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu -; CHECK-NEXT: vfwmacc.vv v8, v12, v14, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmacc.mask.nxv4f64.nxv4f32( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwmacc.nxv8f64.nxv8f32( - , - , - , - i64); - -define @intrinsic_vfwmacc_vv_nxv8f64_nxv8f32_nxv8f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwmacc_vv_nxv8f64_nxv8f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu -; CHECK-NEXT: vfwmacc.vv v8, v16, v20 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmacc.nxv8f64.nxv8f32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwmacc.mask.nxv8f64.nxv8f32( - , - , - , - , - i64); - -define @intrinsic_vfwmacc_mask_vv_nxv8f64_nxv8f32_nxv8f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv8f64_nxv8f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu -; CHECK-NEXT: vfwmacc.vv v8, v16, v20, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmacc.mask.nxv8f64.nxv8f32( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwmacc.nxv1f32.f16( - , - half, - , - i64); - -define @intrinsic_vfwmacc_vf_nxv1f32_f16_nxv1f16( %0, half %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwmacc_vf_nxv1f32_f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu -; CHECK-NEXT: vfwmacc.vf v8, fa0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmacc.nxv1f32.f16( - %0, - half %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwmacc.mask.nxv1f32.f16( - , - half, - , - , - i64); - -define @intrinsic_vfwmacc_mask_vf_nxv1f32_f16_nxv1f16( %0, half %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv1f32_f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu -; CHECK-NEXT: vfwmacc.vf v8, fa0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmacc.mask.nxv1f32.f16( - %0, - half %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwmacc.nxv2f32.f16( - , - half, - , - i64); - -define @intrinsic_vfwmacc_vf_nxv2f32_f16_nxv2f16( %0, half %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwmacc_vf_nxv2f32_f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu -; CHECK-NEXT: vfwmacc.vf v8, fa0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmacc.nxv2f32.f16( - %0, - half %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwmacc.mask.nxv2f32.f16( - , - half, - , - , - i64); - -define @intrinsic_vfwmacc_mask_vf_nxv2f32_f16_nxv2f16( %0, half %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv2f32_f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu -; CHECK-NEXT: vfwmacc.vf v8, fa0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmacc.mask.nxv2f32.f16( - %0, - half %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwmacc.nxv4f32.f16( - , - half, - , - i64); - -define @intrinsic_vfwmacc_vf_nxv4f32_f16_nxv4f16( %0, half %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwmacc_vf_nxv4f32_f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu -; CHECK-NEXT: vfwmacc.vf v8, fa0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmacc.nxv4f32.f16( - %0, - half %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwmacc.mask.nxv4f32.f16( - , - half, - , - , - i64); - -define @intrinsic_vfwmacc_mask_vf_nxv4f32_f16_nxv4f16( %0, half %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv4f32_f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu -; CHECK-NEXT: vfwmacc.vf v8, fa0, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmacc.mask.nxv4f32.f16( - %0, - half %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwmacc.nxv8f32.f16( - , - half, - , - i64); - -define @intrinsic_vfwmacc_vf_nxv8f32_f16_nxv8f16( %0, half %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwmacc_vf_nxv8f32_f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu -; CHECK-NEXT: vfwmacc.vf v8, fa0, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmacc.nxv8f32.f16( - %0, - half %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwmacc.mask.nxv8f32.f16( - , - half, - , - , - i64); - -define @intrinsic_vfwmacc_mask_vf_nxv8f32_f16_nxv8f16( %0, half %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv8f32_f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu -; CHECK-NEXT: vfwmacc.vf v8, fa0, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmacc.mask.nxv8f32.f16( - %0, - half %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwmacc.nxv16f32.f16( - , - half, - , - i64); - -define @intrinsic_vfwmacc_vf_nxv16f32_f16_nxv16f16( %0, half %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwmacc_vf_nxv16f32_f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu -; CHECK-NEXT: vfwmacc.vf v8, fa0, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmacc.nxv16f32.f16( - %0, - half %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwmacc.mask.nxv16f32.f16( - , - half, - , - , - i64); - -define @intrinsic_vfwmacc_mask_vf_nxv16f32_f16_nxv16f16( %0, half %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv16f32_f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu -; CHECK-NEXT: vfwmacc.vf v8, fa0, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmacc.mask.nxv16f32.f16( - %0, - half %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwmacc.nxv1f64.f32( - , - float, - , - i64); - -define @intrinsic_vfwmacc_vf_nxv1f64_f32_nxv1f32( %0, float %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwmacc_vf_nxv1f64_f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu -; CHECK-NEXT: vfwmacc.vf v8, fa0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmacc.nxv1f64.f32( - %0, - float %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwmacc.mask.nxv1f64.f32( - , - float, - , - , - i64); - -define @intrinsic_vfwmacc_mask_vf_nxv1f64_f32_nxv1f32( %0, float %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv1f64_f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu -; CHECK-NEXT: vfwmacc.vf v8, fa0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmacc.mask.nxv1f64.f32( - %0, - float %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwmacc.nxv2f64.f32( - , - float, - , - i64); - -define @intrinsic_vfwmacc_vf_nxv2f64_f32_nxv2f32( %0, float %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwmacc_vf_nxv2f64_f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu -; CHECK-NEXT: vfwmacc.vf v8, fa0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmacc.nxv2f64.f32( - %0, - float %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwmacc.mask.nxv2f64.f32( - , - float, - , - , - i64); - -define @intrinsic_vfwmacc_mask_vf_nxv2f64_f32_nxv2f32( %0, float %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv2f64_f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu -; CHECK-NEXT: vfwmacc.vf v8, fa0, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmacc.mask.nxv2f64.f32( - %0, - float %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwmacc.nxv4f64.f32( - , - float, - , - i64); - -define @intrinsic_vfwmacc_vf_nxv4f64_f32_nxv4f32( %0, float %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwmacc_vf_nxv4f64_f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu -; CHECK-NEXT: vfwmacc.vf v8, fa0, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmacc.nxv4f64.f32( - %0, - float %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwmacc.mask.nxv4f64.f32( - , - float, - , - , - i64); - -define @intrinsic_vfwmacc_mask_vf_nxv4f64_f32_nxv4f32( %0, float %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv4f64_f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu -; CHECK-NEXT: vfwmacc.vf v8, fa0, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmacc.mask.nxv4f64.f32( - %0, - float %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwmacc.nxv8f64.f32( - , - float, - , - i64); - -define @intrinsic_vfwmacc_vf_nxv8f64_f32_nxv8f32( %0, float %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwmacc_vf_nxv8f64_f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu -; CHECK-NEXT: vfwmacc.vf v8, fa0, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmacc.nxv8f64.f32( - %0, - float %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwmacc.mask.nxv8f64.f32( - , - float, - , - , - i64); - -define @intrinsic_vfwmacc_mask_vf_nxv8f64_f32_nxv8f32( %0, float %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv8f64_f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu -; CHECK-NEXT: vfwmacc.vf v8, fa0, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmacc.mask.nxv8f64.f32( - %0, - float %1, - %2, - %3, - i64 %4) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwmacc-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwmacc.ll similarity index 89% rename from llvm/test/CodeGen/RISCV/rvv/vfwmacc-rv32.ll rename to llvm/test/CodeGen/RISCV/rvv/vfwmacc.ll index 0de121cb3f0024da9eb1e310498dd31305546533..f5db61b5e8c7c8b77325ec61135603d27fa08c3a 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfwmacc-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwmacc.ll @@ -1,13 +1,15 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: -target-abi=ilp32d < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfwmacc.nxv1f32.nxv1f16( , , , - i32); + iXLen); -define @intrinsic_vfwmacc_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwmacc_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_vv_nxv1f32_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu @@ -18,7 +20,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -28,9 +30,9 @@ declare @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1f16( , , , - i32); + iXLen); -define @intrinsic_vfwmacc_mask_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwmacc_mask_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv1f32_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu @@ -42,7 +44,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -51,9 +53,9 @@ declare @llvm.riscv.vfwmacc.nxv2f32.nxv2f16( , , , - i32); + iXLen); -define @intrinsic_vfwmacc_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwmacc_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_vv_nxv2f32_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu @@ -64,7 +66,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -74,9 +76,9 @@ declare @llvm.riscv.vfwmacc.mask.nxv2f32.nxv2f16( , , , - i32); + iXLen); -define @intrinsic_vfwmacc_mask_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwmacc_mask_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv2f32_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu @@ -88,7 +90,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -97,9 +99,9 @@ declare @llvm.riscv.vfwmacc.nxv4f32.nxv4f16( , , , - i32); + iXLen); -define @intrinsic_vfwmacc_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwmacc_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_vv_nxv4f32_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu @@ -110,7 +112,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -120,9 +122,9 @@ declare @llvm.riscv.vfwmacc.mask.nxv4f32.nxv4f16( , , , - i32); + iXLen); -define @intrinsic_vfwmacc_mask_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwmacc_mask_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv4f32_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu @@ -134,7 +136,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -143,9 +145,9 @@ declare @llvm.riscv.vfwmacc.nxv8f32.nxv8f16( , , , - i32); + iXLen); -define @intrinsic_vfwmacc_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwmacc_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_vv_nxv8f32_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu @@ -156,7 +158,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -166,9 +168,9 @@ declare @llvm.riscv.vfwmacc.mask.nxv8f32.nxv8f16( , , , - i32); + iXLen); -define @intrinsic_vfwmacc_mask_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwmacc_mask_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv8f32_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu @@ -180,7 +182,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -189,9 +191,9 @@ declare @llvm.riscv.vfwmacc.nxv16f32.nxv16f16( , , , - i32); + iXLen); -define @intrinsic_vfwmacc_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwmacc_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_vv_nxv16f32_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu @@ -202,7 +204,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -212,9 +214,9 @@ declare @llvm.riscv.vfwmacc.mask.nxv16f32.nxv16f16( , , , - i32); + iXLen); -define @intrinsic_vfwmacc_mask_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwmacc_mask_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv16f32_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu @@ -226,7 +228,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -235,9 +237,9 @@ declare @llvm.riscv.vfwmacc.nxv1f64.nxv1f32( , , , - i32); + iXLen); -define @intrinsic_vfwmacc_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwmacc_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_vv_nxv1f64_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu @@ -248,7 +250,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -258,9 +260,9 @@ declare @llvm.riscv.vfwmacc.mask.nxv1f64.nxv1f32( , , , - i32); + iXLen); -define @intrinsic_vfwmacc_mask_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwmacc_mask_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv1f64_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu @@ -272,7 +274,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -281,9 +283,9 @@ declare @llvm.riscv.vfwmacc.nxv2f64.nxv2f32( , , , - i32); + iXLen); -define @intrinsic_vfwmacc_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwmacc_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_vv_nxv2f64_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu @@ -294,7 +296,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -304,9 +306,9 @@ declare @llvm.riscv.vfwmacc.mask.nxv2f64.nxv2f32( , , , - i32); + iXLen); -define @intrinsic_vfwmacc_mask_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwmacc_mask_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv2f64_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu @@ -318,7 +320,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -327,9 +329,9 @@ declare @llvm.riscv.vfwmacc.nxv4f64.nxv4f32( , , , - i32); + iXLen); -define @intrinsic_vfwmacc_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwmacc_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_vv_nxv4f64_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu @@ -340,7 +342,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -350,9 +352,9 @@ declare @llvm.riscv.vfwmacc.mask.nxv4f64.nxv4f32( , , , - i32); + iXLen); -define @intrinsic_vfwmacc_mask_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwmacc_mask_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv4f64_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu @@ -364,7 +366,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -373,9 +375,9 @@ declare @llvm.riscv.vfwmacc.nxv8f64.nxv8f32( , , , - i32); + iXLen); -define @intrinsic_vfwmacc_vv_nxv8f64_nxv8f32_nxv8f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwmacc_vv_nxv8f64_nxv8f32_nxv8f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_vv_nxv8f64_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu @@ -386,7 +388,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -396,9 +398,9 @@ declare @llvm.riscv.vfwmacc.mask.nxv8f64.nxv8f32( , , , - i32); + iXLen); -define @intrinsic_vfwmacc_mask_vv_nxv8f64_nxv8f32_nxv8f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwmacc_mask_vv_nxv8f64_nxv8f32_nxv8f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv8f64_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu @@ -410,7 +412,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -419,9 +421,9 @@ declare @llvm.riscv.vfwmacc.nxv1f32.f16( , half, , - i32); + iXLen); -define @intrinsic_vfwmacc_vf_nxv1f32_f16_nxv1f16( %0, half %1, %2, i32 %3) nounwind { +define @intrinsic_vfwmacc_vf_nxv1f32_f16_nxv1f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_vf_nxv1f32_f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu @@ -432,7 +434,7 @@ entry: %0, half %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -442,9 +444,9 @@ declare @llvm.riscv.vfwmacc.mask.nxv1f32.f16( half, , , - i32); + iXLen); -define @intrinsic_vfwmacc_mask_vf_nxv1f32_f16_nxv1f16( %0, half %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwmacc_mask_vf_nxv1f32_f16_nxv1f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv1f32_f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu @@ -456,7 +458,7 @@ entry: half %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -465,9 +467,9 @@ declare @llvm.riscv.vfwmacc.nxv2f32.f16( , half, , - i32); + iXLen); -define @intrinsic_vfwmacc_vf_nxv2f32_f16_nxv2f16( %0, half %1, %2, i32 %3) nounwind { +define @intrinsic_vfwmacc_vf_nxv2f32_f16_nxv2f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_vf_nxv2f32_f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu @@ -478,7 +480,7 @@ entry: %0, half %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -488,9 +490,9 @@ declare @llvm.riscv.vfwmacc.mask.nxv2f32.f16( half, , , - i32); + iXLen); -define @intrinsic_vfwmacc_mask_vf_nxv2f32_f16_nxv2f16( %0, half %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwmacc_mask_vf_nxv2f32_f16_nxv2f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv2f32_f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu @@ -502,7 +504,7 @@ entry: half %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -511,9 +513,9 @@ declare @llvm.riscv.vfwmacc.nxv4f32.f16( , half, , - i32); + iXLen); -define @intrinsic_vfwmacc_vf_nxv4f32_f16_nxv4f16( %0, half %1, %2, i32 %3) nounwind { +define @intrinsic_vfwmacc_vf_nxv4f32_f16_nxv4f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_vf_nxv4f32_f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu @@ -524,7 +526,7 @@ entry: %0, half %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -534,9 +536,9 @@ declare @llvm.riscv.vfwmacc.mask.nxv4f32.f16( half, , , - i32); + iXLen); -define @intrinsic_vfwmacc_mask_vf_nxv4f32_f16_nxv4f16( %0, half %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwmacc_mask_vf_nxv4f32_f16_nxv4f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv4f32_f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu @@ -548,7 +550,7 @@ entry: half %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -557,9 +559,9 @@ declare @llvm.riscv.vfwmacc.nxv8f32.f16( , half, , - i32); + iXLen); -define @intrinsic_vfwmacc_vf_nxv8f32_f16_nxv8f16( %0, half %1, %2, i32 %3) nounwind { +define @intrinsic_vfwmacc_vf_nxv8f32_f16_nxv8f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_vf_nxv8f32_f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu @@ -570,7 +572,7 @@ entry: %0, half %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -580,9 +582,9 @@ declare @llvm.riscv.vfwmacc.mask.nxv8f32.f16( half, , , - i32); + iXLen); -define @intrinsic_vfwmacc_mask_vf_nxv8f32_f16_nxv8f16( %0, half %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwmacc_mask_vf_nxv8f32_f16_nxv8f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv8f32_f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu @@ -594,7 +596,7 @@ entry: half %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -603,9 +605,9 @@ declare @llvm.riscv.vfwmacc.nxv16f32.f16( , half, , - i32); + iXLen); -define @intrinsic_vfwmacc_vf_nxv16f32_f16_nxv16f16( %0, half %1, %2, i32 %3) nounwind { +define @intrinsic_vfwmacc_vf_nxv16f32_f16_nxv16f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_vf_nxv16f32_f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu @@ -616,7 +618,7 @@ entry: %0, half %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -626,9 +628,9 @@ declare @llvm.riscv.vfwmacc.mask.nxv16f32.f16( half, , , - i32); + iXLen); -define @intrinsic_vfwmacc_mask_vf_nxv16f32_f16_nxv16f16( %0, half %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwmacc_mask_vf_nxv16f32_f16_nxv16f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv16f32_f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu @@ -640,7 +642,7 @@ entry: half %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -649,9 +651,9 @@ declare @llvm.riscv.vfwmacc.nxv1f64.f32( , float, , - i32); + iXLen); -define @intrinsic_vfwmacc_vf_nxv1f64_f32_nxv1f32( %0, float %1, %2, i32 %3) nounwind { +define @intrinsic_vfwmacc_vf_nxv1f64_f32_nxv1f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_vf_nxv1f64_f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu @@ -662,7 +664,7 @@ entry: %0, float %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -672,9 +674,9 @@ declare @llvm.riscv.vfwmacc.mask.nxv1f64.f32( float, , , - i32); + iXLen); -define @intrinsic_vfwmacc_mask_vf_nxv1f64_f32_nxv1f32( %0, float %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwmacc_mask_vf_nxv1f64_f32_nxv1f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv1f64_f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu @@ -686,7 +688,7 @@ entry: float %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -695,9 +697,9 @@ declare @llvm.riscv.vfwmacc.nxv2f64.f32( , float, , - i32); + iXLen); -define @intrinsic_vfwmacc_vf_nxv2f64_f32_nxv2f32( %0, float %1, %2, i32 %3) nounwind { +define @intrinsic_vfwmacc_vf_nxv2f64_f32_nxv2f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_vf_nxv2f64_f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu @@ -708,7 +710,7 @@ entry: %0, float %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -718,9 +720,9 @@ declare @llvm.riscv.vfwmacc.mask.nxv2f64.f32( float, , , - i32); + iXLen); -define @intrinsic_vfwmacc_mask_vf_nxv2f64_f32_nxv2f32( %0, float %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwmacc_mask_vf_nxv2f64_f32_nxv2f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv2f64_f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu @@ -732,7 +734,7 @@ entry: float %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -741,9 +743,9 @@ declare @llvm.riscv.vfwmacc.nxv4f64.f32( , float, , - i32); + iXLen); -define @intrinsic_vfwmacc_vf_nxv4f64_f32_nxv4f32( %0, float %1, %2, i32 %3) nounwind { +define @intrinsic_vfwmacc_vf_nxv4f64_f32_nxv4f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_vf_nxv4f64_f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu @@ -754,7 +756,7 @@ entry: %0, float %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -764,9 +766,9 @@ declare @llvm.riscv.vfwmacc.mask.nxv4f64.f32( float, , , - i32); + iXLen); -define @intrinsic_vfwmacc_mask_vf_nxv4f64_f32_nxv4f32( %0, float %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwmacc_mask_vf_nxv4f64_f32_nxv4f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv4f64_f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu @@ -778,7 +780,7 @@ entry: float %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -787,9 +789,9 @@ declare @llvm.riscv.vfwmacc.nxv8f64.f32( , float, , - i32); + iXLen); -define @intrinsic_vfwmacc_vf_nxv8f64_f32_nxv8f32( %0, float %1, %2, i32 %3) nounwind { +define @intrinsic_vfwmacc_vf_nxv8f64_f32_nxv8f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_vf_nxv8f64_f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu @@ -800,7 +802,7 @@ entry: %0, float %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -810,9 +812,9 @@ declare @llvm.riscv.vfwmacc.mask.nxv8f64.f32( float, , , - i32); + iXLen); -define @intrinsic_vfwmacc_mask_vf_nxv8f64_f32_nxv8f32( %0, float %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwmacc_mask_vf_nxv8f64_f32_nxv8f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv8f64_f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu @@ -824,7 +826,7 @@ entry: float %1, %2, %3, - i32 %4) + iXLen %4) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwmsac-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwmsac-rv64.ll deleted file mode 100644 index b2e1e235e969555d258189b1c84293d99e859a2e..0000000000000000000000000000000000000000 --- a/llvm/test/CodeGen/RISCV/rvv/vfwmsac-rv64.ll +++ /dev/null @@ -1,830 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: -target-abi=lp64d < %s | FileCheck %s -declare @llvm.riscv.vfwmsac.nxv1f32.nxv1f16( - , - , - , - i64); - -define @intrinsic_vfwmsac_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv1f32_nxv1f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu -; CHECK-NEXT: vfwmsac.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmsac.nxv1f32.nxv1f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwmsac.mask.nxv1f32.nxv1f16( - , - , - , - , - i64); - -define @intrinsic_vfwmsac_mask_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv1f32_nxv1f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu -; CHECK-NEXT: vfwmsac.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmsac.mask.nxv1f32.nxv1f16( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwmsac.nxv2f32.nxv2f16( - , - , - , - i64); - -define @intrinsic_vfwmsac_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv2f32_nxv2f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu -; CHECK-NEXT: vfwmsac.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmsac.nxv2f32.nxv2f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwmsac.mask.nxv2f32.nxv2f16( - , - , - , - , - i64); - -define @intrinsic_vfwmsac_mask_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv2f32_nxv2f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu -; CHECK-NEXT: vfwmsac.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmsac.mask.nxv2f32.nxv2f16( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwmsac.nxv4f32.nxv4f16( - , - , - , - i64); - -define @intrinsic_vfwmsac_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv4f32_nxv4f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu -; CHECK-NEXT: vfwmsac.vv v8, v10, v11 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmsac.nxv4f32.nxv4f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwmsac.mask.nxv4f32.nxv4f16( - , - , - , - , - i64); - -define @intrinsic_vfwmsac_mask_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv4f32_nxv4f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu -; CHECK-NEXT: vfwmsac.vv v8, v10, v11, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmsac.mask.nxv4f32.nxv4f16( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwmsac.nxv8f32.nxv8f16( - , - , - , - i64); - -define @intrinsic_vfwmsac_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv8f32_nxv8f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu -; CHECK-NEXT: vfwmsac.vv v8, v12, v14 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmsac.nxv8f32.nxv8f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwmsac.mask.nxv8f32.nxv8f16( - , - , - , - , - i64); - -define @intrinsic_vfwmsac_mask_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv8f32_nxv8f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu -; CHECK-NEXT: vfwmsac.vv v8, v12, v14, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmsac.mask.nxv8f32.nxv8f16( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwmsac.nxv16f32.nxv16f16( - , - , - , - i64); - -define @intrinsic_vfwmsac_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv16f32_nxv16f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu -; CHECK-NEXT: vfwmsac.vv v8, v16, v20 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmsac.nxv16f32.nxv16f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwmsac.mask.nxv16f32.nxv16f16( - , - , - , - , - i64); - -define @intrinsic_vfwmsac_mask_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv16f32_nxv16f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu -; CHECK-NEXT: vfwmsac.vv v8, v16, v20, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmsac.mask.nxv16f32.nxv16f16( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwmsac.nxv1f64.nxv1f32( - , - , - , - i64); - -define @intrinsic_vfwmsac_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv1f64_nxv1f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu -; CHECK-NEXT: vfwmsac.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmsac.nxv1f64.nxv1f32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwmsac.mask.nxv1f64.nxv1f32( - , - , - , - , - i64); - -define @intrinsic_vfwmsac_mask_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv1f64_nxv1f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu -; CHECK-NEXT: vfwmsac.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmsac.mask.nxv1f64.nxv1f32( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwmsac.nxv2f64.nxv2f32( - , - , - , - i64); - -define @intrinsic_vfwmsac_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv2f64_nxv2f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu -; CHECK-NEXT: vfwmsac.vv v8, v10, v11 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmsac.nxv2f64.nxv2f32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwmsac.mask.nxv2f64.nxv2f32( - , - , - , - , - i64); - -define @intrinsic_vfwmsac_mask_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv2f64_nxv2f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu -; CHECK-NEXT: vfwmsac.vv v8, v10, v11, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmsac.mask.nxv2f64.nxv2f32( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwmsac.nxv4f64.nxv4f32( - , - , - , - i64); - -define @intrinsic_vfwmsac_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv4f64_nxv4f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu -; CHECK-NEXT: vfwmsac.vv v8, v12, v14 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmsac.nxv4f64.nxv4f32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwmsac.mask.nxv4f64.nxv4f32( - , - , - , - , - i64); - -define @intrinsic_vfwmsac_mask_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv4f64_nxv4f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu -; CHECK-NEXT: vfwmsac.vv v8, v12, v14, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmsac.mask.nxv4f64.nxv4f32( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwmsac.nxv8f64.nxv8f32( - , - , - , - i64); - -define @intrinsic_vfwmsac_vv_nxv8f64_nxv8f32_nxv8f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv8f64_nxv8f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu -; CHECK-NEXT: vfwmsac.vv v8, v16, v20 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmsac.nxv8f64.nxv8f32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwmsac.mask.nxv8f64.nxv8f32( - , - , - , - , - i64); - -define @intrinsic_vfwmsac_mask_vv_nxv8f64_nxv8f32_nxv8f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv8f64_nxv8f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu -; CHECK-NEXT: vfwmsac.vv v8, v16, v20, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmsac.mask.nxv8f64.nxv8f32( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwmsac.nxv1f32.f16( - , - half, - , - i64); - -define @intrinsic_vfwmsac_vf_nxv1f32_f16_nxv1f16( %0, half %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv1f32_f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu -; CHECK-NEXT: vfwmsac.vf v8, fa0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmsac.nxv1f32.f16( - %0, - half %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwmsac.mask.nxv1f32.f16( - , - half, - , - , - i64); - -define @intrinsic_vfwmsac_mask_vf_nxv1f32_f16_nxv1f16( %0, half %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv1f32_f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu -; CHECK-NEXT: vfwmsac.vf v8, fa0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmsac.mask.nxv1f32.f16( - %0, - half %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwmsac.nxv2f32.f16( - , - half, - , - i64); - -define @intrinsic_vfwmsac_vf_nxv2f32_f16_nxv2f16( %0, half %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv2f32_f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu -; CHECK-NEXT: vfwmsac.vf v8, fa0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmsac.nxv2f32.f16( - %0, - half %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwmsac.mask.nxv2f32.f16( - , - half, - , - , - i64); - -define @intrinsic_vfwmsac_mask_vf_nxv2f32_f16_nxv2f16( %0, half %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv2f32_f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu -; CHECK-NEXT: vfwmsac.vf v8, fa0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmsac.mask.nxv2f32.f16( - %0, - half %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwmsac.nxv4f32.f16( - , - half, - , - i64); - -define @intrinsic_vfwmsac_vf_nxv4f32_f16_nxv4f16( %0, half %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv4f32_f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu -; CHECK-NEXT: vfwmsac.vf v8, fa0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmsac.nxv4f32.f16( - %0, - half %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwmsac.mask.nxv4f32.f16( - , - half, - , - , - i64); - -define @intrinsic_vfwmsac_mask_vf_nxv4f32_f16_nxv4f16( %0, half %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv4f32_f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu -; CHECK-NEXT: vfwmsac.vf v8, fa0, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmsac.mask.nxv4f32.f16( - %0, - half %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwmsac.nxv8f32.f16( - , - half, - , - i64); - -define @intrinsic_vfwmsac_vf_nxv8f32_f16_nxv8f16( %0, half %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv8f32_f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu -; CHECK-NEXT: vfwmsac.vf v8, fa0, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmsac.nxv8f32.f16( - %0, - half %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwmsac.mask.nxv8f32.f16( - , - half, - , - , - i64); - -define @intrinsic_vfwmsac_mask_vf_nxv8f32_f16_nxv8f16( %0, half %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv8f32_f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu -; CHECK-NEXT: vfwmsac.vf v8, fa0, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmsac.mask.nxv8f32.f16( - %0, - half %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwmsac.nxv16f32.f16( - , - half, - , - i64); - -define @intrinsic_vfwmsac_vf_nxv16f32_f16_nxv16f16( %0, half %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv16f32_f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu -; CHECK-NEXT: vfwmsac.vf v8, fa0, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmsac.nxv16f32.f16( - %0, - half %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwmsac.mask.nxv16f32.f16( - , - half, - , - , - i64); - -define @intrinsic_vfwmsac_mask_vf_nxv16f32_f16_nxv16f16( %0, half %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv16f32_f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu -; CHECK-NEXT: vfwmsac.vf v8, fa0, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmsac.mask.nxv16f32.f16( - %0, - half %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwmsac.nxv1f64.f32( - , - float, - , - i64); - -define @intrinsic_vfwmsac_vf_nxv1f64_f32_nxv1f32( %0, float %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv1f64_f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu -; CHECK-NEXT: vfwmsac.vf v8, fa0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmsac.nxv1f64.f32( - %0, - float %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwmsac.mask.nxv1f64.f32( - , - float, - , - , - i64); - -define @intrinsic_vfwmsac_mask_vf_nxv1f64_f32_nxv1f32( %0, float %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv1f64_f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu -; CHECK-NEXT: vfwmsac.vf v8, fa0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmsac.mask.nxv1f64.f32( - %0, - float %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwmsac.nxv2f64.f32( - , - float, - , - i64); - -define @intrinsic_vfwmsac_vf_nxv2f64_f32_nxv2f32( %0, float %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv2f64_f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu -; CHECK-NEXT: vfwmsac.vf v8, fa0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmsac.nxv2f64.f32( - %0, - float %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwmsac.mask.nxv2f64.f32( - , - float, - , - , - i64); - -define @intrinsic_vfwmsac_mask_vf_nxv2f64_f32_nxv2f32( %0, float %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv2f64_f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu -; CHECK-NEXT: vfwmsac.vf v8, fa0, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmsac.mask.nxv2f64.f32( - %0, - float %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwmsac.nxv4f64.f32( - , - float, - , - i64); - -define @intrinsic_vfwmsac_vf_nxv4f64_f32_nxv4f32( %0, float %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv4f64_f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu -; CHECK-NEXT: vfwmsac.vf v8, fa0, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmsac.nxv4f64.f32( - %0, - float %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwmsac.mask.nxv4f64.f32( - , - float, - , - , - i64); - -define @intrinsic_vfwmsac_mask_vf_nxv4f64_f32_nxv4f32( %0, float %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv4f64_f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu -; CHECK-NEXT: vfwmsac.vf v8, fa0, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmsac.mask.nxv4f64.f32( - %0, - float %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwmsac.nxv8f64.f32( - , - float, - , - i64); - -define @intrinsic_vfwmsac_vf_nxv8f64_f32_nxv8f32( %0, float %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv8f64_f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu -; CHECK-NEXT: vfwmsac.vf v8, fa0, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmsac.nxv8f64.f32( - %0, - float %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwmsac.mask.nxv8f64.f32( - , - float, - , - , - i64); - -define @intrinsic_vfwmsac_mask_vf_nxv8f64_f32_nxv8f32( %0, float %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv8f64_f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu -; CHECK-NEXT: vfwmsac.vf v8, fa0, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmsac.mask.nxv8f64.f32( - %0, - float %1, - %2, - %3, - i64 %4) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwmsac-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwmsac.ll similarity index 89% rename from llvm/test/CodeGen/RISCV/rvv/vfwmsac-rv32.ll rename to llvm/test/CodeGen/RISCV/rvv/vfwmsac.ll index 82c4fad996e7130717bf1d9ad49c7bf85d4a8546..884ee36575b4afa20671ca60a076735f9d31dfbc 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfwmsac-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwmsac.ll @@ -1,13 +1,15 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: -target-abi=ilp32d < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfwmsac.nxv1f32.nxv1f16( , , , - i32); + iXLen); -define @intrinsic_vfwmsac_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwmsac_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv1f32_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu @@ -18,7 +20,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -28,9 +30,9 @@ declare @llvm.riscv.vfwmsac.mask.nxv1f32.nxv1f16( , , , - i32); + iXLen); -define @intrinsic_vfwmsac_mask_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwmsac_mask_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv1f32_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu @@ -42,7 +44,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -51,9 +53,9 @@ declare @llvm.riscv.vfwmsac.nxv2f32.nxv2f16( , , , - i32); + iXLen); -define @intrinsic_vfwmsac_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwmsac_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv2f32_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu @@ -64,7 +66,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -74,9 +76,9 @@ declare @llvm.riscv.vfwmsac.mask.nxv2f32.nxv2f16( , , , - i32); + iXLen); -define @intrinsic_vfwmsac_mask_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwmsac_mask_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv2f32_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu @@ -88,7 +90,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -97,9 +99,9 @@ declare @llvm.riscv.vfwmsac.nxv4f32.nxv4f16( , , , - i32); + iXLen); -define @intrinsic_vfwmsac_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwmsac_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv4f32_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu @@ -110,7 +112,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -120,9 +122,9 @@ declare @llvm.riscv.vfwmsac.mask.nxv4f32.nxv4f16( , , , - i32); + iXLen); -define @intrinsic_vfwmsac_mask_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwmsac_mask_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv4f32_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu @@ -134,7 +136,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -143,9 +145,9 @@ declare @llvm.riscv.vfwmsac.nxv8f32.nxv8f16( , , , - i32); + iXLen); -define @intrinsic_vfwmsac_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwmsac_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv8f32_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu @@ -156,7 +158,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -166,9 +168,9 @@ declare @llvm.riscv.vfwmsac.mask.nxv8f32.nxv8f16( , , , - i32); + iXLen); -define @intrinsic_vfwmsac_mask_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwmsac_mask_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv8f32_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu @@ -180,7 +182,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -189,9 +191,9 @@ declare @llvm.riscv.vfwmsac.nxv16f32.nxv16f16( , , , - i32); + iXLen); -define @intrinsic_vfwmsac_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwmsac_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv16f32_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu @@ -202,7 +204,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -212,9 +214,9 @@ declare @llvm.riscv.vfwmsac.mask.nxv16f32.nxv16f16( , , , - i32); + iXLen); -define @intrinsic_vfwmsac_mask_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwmsac_mask_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv16f32_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu @@ -226,7 +228,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -235,9 +237,9 @@ declare @llvm.riscv.vfwmsac.nxv1f64.nxv1f32( , , , - i32); + iXLen); -define @intrinsic_vfwmsac_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwmsac_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv1f64_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu @@ -248,7 +250,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -258,9 +260,9 @@ declare @llvm.riscv.vfwmsac.mask.nxv1f64.nxv1f32( , , , - i32); + iXLen); -define @intrinsic_vfwmsac_mask_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwmsac_mask_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv1f64_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu @@ -272,7 +274,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -281,9 +283,9 @@ declare @llvm.riscv.vfwmsac.nxv2f64.nxv2f32( , , , - i32); + iXLen); -define @intrinsic_vfwmsac_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwmsac_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv2f64_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu @@ -294,7 +296,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -304,9 +306,9 @@ declare @llvm.riscv.vfwmsac.mask.nxv2f64.nxv2f32( , , , - i32); + iXLen); -define @intrinsic_vfwmsac_mask_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwmsac_mask_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv2f64_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu @@ -318,7 +320,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -327,9 +329,9 @@ declare @llvm.riscv.vfwmsac.nxv4f64.nxv4f32( , , , - i32); + iXLen); -define @intrinsic_vfwmsac_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwmsac_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv4f64_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu @@ -340,7 +342,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -350,9 +352,9 @@ declare @llvm.riscv.vfwmsac.mask.nxv4f64.nxv4f32( , , , - i32); + iXLen); -define @intrinsic_vfwmsac_mask_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwmsac_mask_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv4f64_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu @@ -364,7 +366,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -373,9 +375,9 @@ declare @llvm.riscv.vfwmsac.nxv8f64.nxv8f32( , , , - i32); + iXLen); -define @intrinsic_vfwmsac_vv_nxv8f64_nxv8f32_nxv8f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwmsac_vv_nxv8f64_nxv8f32_nxv8f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv8f64_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu @@ -386,7 +388,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -396,9 +398,9 @@ declare @llvm.riscv.vfwmsac.mask.nxv8f64.nxv8f32( , , , - i32); + iXLen); -define @intrinsic_vfwmsac_mask_vv_nxv8f64_nxv8f32_nxv8f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwmsac_mask_vv_nxv8f64_nxv8f32_nxv8f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv8f64_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu @@ -410,7 +412,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -419,9 +421,9 @@ declare @llvm.riscv.vfwmsac.nxv1f32.f16( , half, , - i32); + iXLen); -define @intrinsic_vfwmsac_vf_nxv1f32_f16_nxv1f16( %0, half %1, %2, i32 %3) nounwind { +define @intrinsic_vfwmsac_vf_nxv1f32_f16_nxv1f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv1f32_f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu @@ -432,7 +434,7 @@ entry: %0, half %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -442,9 +444,9 @@ declare @llvm.riscv.vfwmsac.mask.nxv1f32.f16( half, , , - i32); + iXLen); -define @intrinsic_vfwmsac_mask_vf_nxv1f32_f16_nxv1f16( %0, half %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwmsac_mask_vf_nxv1f32_f16_nxv1f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv1f32_f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu @@ -456,7 +458,7 @@ entry: half %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -465,9 +467,9 @@ declare @llvm.riscv.vfwmsac.nxv2f32.f16( , half, , - i32); + iXLen); -define @intrinsic_vfwmsac_vf_nxv2f32_f16_nxv2f16( %0, half %1, %2, i32 %3) nounwind { +define @intrinsic_vfwmsac_vf_nxv2f32_f16_nxv2f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv2f32_f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu @@ -478,7 +480,7 @@ entry: %0, half %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -488,9 +490,9 @@ declare @llvm.riscv.vfwmsac.mask.nxv2f32.f16( half, , , - i32); + iXLen); -define @intrinsic_vfwmsac_mask_vf_nxv2f32_f16_nxv2f16( %0, half %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwmsac_mask_vf_nxv2f32_f16_nxv2f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv2f32_f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu @@ -502,7 +504,7 @@ entry: half %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -511,9 +513,9 @@ declare @llvm.riscv.vfwmsac.nxv4f32.f16( , half, , - i32); + iXLen); -define @intrinsic_vfwmsac_vf_nxv4f32_f16_nxv4f16( %0, half %1, %2, i32 %3) nounwind { +define @intrinsic_vfwmsac_vf_nxv4f32_f16_nxv4f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv4f32_f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu @@ -524,7 +526,7 @@ entry: %0, half %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -534,9 +536,9 @@ declare @llvm.riscv.vfwmsac.mask.nxv4f32.f16( half, , , - i32); + iXLen); -define @intrinsic_vfwmsac_mask_vf_nxv4f32_f16_nxv4f16( %0, half %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwmsac_mask_vf_nxv4f32_f16_nxv4f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv4f32_f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu @@ -548,7 +550,7 @@ entry: half %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -557,9 +559,9 @@ declare @llvm.riscv.vfwmsac.nxv8f32.f16( , half, , - i32); + iXLen); -define @intrinsic_vfwmsac_vf_nxv8f32_f16_nxv8f16( %0, half %1, %2, i32 %3) nounwind { +define @intrinsic_vfwmsac_vf_nxv8f32_f16_nxv8f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv8f32_f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu @@ -570,7 +572,7 @@ entry: %0, half %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -580,9 +582,9 @@ declare @llvm.riscv.vfwmsac.mask.nxv8f32.f16( half, , , - i32); + iXLen); -define @intrinsic_vfwmsac_mask_vf_nxv8f32_f16_nxv8f16( %0, half %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwmsac_mask_vf_nxv8f32_f16_nxv8f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv8f32_f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu @@ -594,7 +596,7 @@ entry: half %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -603,9 +605,9 @@ declare @llvm.riscv.vfwmsac.nxv16f32.f16( , half, , - i32); + iXLen); -define @intrinsic_vfwmsac_vf_nxv16f32_f16_nxv16f16( %0, half %1, %2, i32 %3) nounwind { +define @intrinsic_vfwmsac_vf_nxv16f32_f16_nxv16f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv16f32_f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu @@ -616,7 +618,7 @@ entry: %0, half %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -626,9 +628,9 @@ declare @llvm.riscv.vfwmsac.mask.nxv16f32.f16( half, , , - i32); + iXLen); -define @intrinsic_vfwmsac_mask_vf_nxv16f32_f16_nxv16f16( %0, half %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwmsac_mask_vf_nxv16f32_f16_nxv16f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv16f32_f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu @@ -640,7 +642,7 @@ entry: half %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -649,9 +651,9 @@ declare @llvm.riscv.vfwmsac.nxv1f64.f32( , float, , - i32); + iXLen); -define @intrinsic_vfwmsac_vf_nxv1f64_f32_nxv1f32( %0, float %1, %2, i32 %3) nounwind { +define @intrinsic_vfwmsac_vf_nxv1f64_f32_nxv1f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv1f64_f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu @@ -662,7 +664,7 @@ entry: %0, float %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -672,9 +674,9 @@ declare @llvm.riscv.vfwmsac.mask.nxv1f64.f32( float, , , - i32); + iXLen); -define @intrinsic_vfwmsac_mask_vf_nxv1f64_f32_nxv1f32( %0, float %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwmsac_mask_vf_nxv1f64_f32_nxv1f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv1f64_f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu @@ -686,7 +688,7 @@ entry: float %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -695,9 +697,9 @@ declare @llvm.riscv.vfwmsac.nxv2f64.f32( , float, , - i32); + iXLen); -define @intrinsic_vfwmsac_vf_nxv2f64_f32_nxv2f32( %0, float %1, %2, i32 %3) nounwind { +define @intrinsic_vfwmsac_vf_nxv2f64_f32_nxv2f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv2f64_f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu @@ -708,7 +710,7 @@ entry: %0, float %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -718,9 +720,9 @@ declare @llvm.riscv.vfwmsac.mask.nxv2f64.f32( float, , , - i32); + iXLen); -define @intrinsic_vfwmsac_mask_vf_nxv2f64_f32_nxv2f32( %0, float %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwmsac_mask_vf_nxv2f64_f32_nxv2f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv2f64_f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu @@ -732,7 +734,7 @@ entry: float %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -741,9 +743,9 @@ declare @llvm.riscv.vfwmsac.nxv4f64.f32( , float, , - i32); + iXLen); -define @intrinsic_vfwmsac_vf_nxv4f64_f32_nxv4f32( %0, float %1, %2, i32 %3) nounwind { +define @intrinsic_vfwmsac_vf_nxv4f64_f32_nxv4f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv4f64_f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu @@ -754,7 +756,7 @@ entry: %0, float %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -764,9 +766,9 @@ declare @llvm.riscv.vfwmsac.mask.nxv4f64.f32( float, , , - i32); + iXLen); -define @intrinsic_vfwmsac_mask_vf_nxv4f64_f32_nxv4f32( %0, float %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwmsac_mask_vf_nxv4f64_f32_nxv4f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv4f64_f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu @@ -778,7 +780,7 @@ entry: float %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -787,9 +789,9 @@ declare @llvm.riscv.vfwmsac.nxv8f64.f32( , float, , - i32); + iXLen); -define @intrinsic_vfwmsac_vf_nxv8f64_f32_nxv8f32( %0, float %1, %2, i32 %3) nounwind { +define @intrinsic_vfwmsac_vf_nxv8f64_f32_nxv8f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv8f64_f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu @@ -800,7 +802,7 @@ entry: %0, float %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -810,9 +812,9 @@ declare @llvm.riscv.vfwmsac.mask.nxv8f64.f32( float, , , - i32); + iXLen); -define @intrinsic_vfwmsac_mask_vf_nxv8f64_f32_nxv8f32( %0, float %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwmsac_mask_vf_nxv8f64_f32_nxv8f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv8f64_f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu @@ -824,7 +826,7 @@ entry: float %1, %2, %3, - i32 %4) + iXLen %4) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwmul-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwmul-rv32.ll deleted file mode 100644 index 670c79975a2e053659d7a6c13be4d18b2d9d0aba..0000000000000000000000000000000000000000 --- a/llvm/test/CodeGen/RISCV/rvv/vfwmul-rv32.ll +++ /dev/null @@ -1,830 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: -target-abi=ilp32d < %s | FileCheck %s -declare @llvm.riscv.vfwmul.nxv1f32.nxv1f16.nxv1f16( - , - , - i32); - -define @intrinsic_vfwmul_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwmul_vv_nxv1f32_nxv1f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfwmul.vv v10, v8, v9 -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmul.nxv1f32.nxv1f16.nxv1f16( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfwmul.mask.nxv1f32.nxv1f16.nxv1f16( - , - , - , - , - i32, - i32); - -define @intrinsic_vfwmul_mask_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv1f32_nxv1f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfwmul.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmul.mask.nxv1f32.nxv1f16.nxv1f16( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfwmul.nxv2f32.nxv2f16.nxv2f16( - , - , - i32); - -define @intrinsic_vfwmul_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwmul_vv_nxv2f32_nxv2f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfwmul.vv v10, v8, v9 -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmul.nxv2f32.nxv2f16.nxv2f16( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfwmul.mask.nxv2f32.nxv2f16.nxv2f16( - , - , - , - , - i32, - i32); - -define @intrinsic_vfwmul_mask_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv2f32_nxv2f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfwmul.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmul.mask.nxv2f32.nxv2f16.nxv2f16( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfwmul.nxv4f32.nxv4f16.nxv4f16( - , - , - i32); - -define @intrinsic_vfwmul_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwmul_vv_nxv4f32_nxv4f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfwmul.vv v10, v8, v9 -; CHECK-NEXT: vmv2r.v v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmul.nxv4f32.nxv4f16.nxv4f16( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfwmul.mask.nxv4f32.nxv4f16.nxv4f16( - , - , - , - , - i32, - i32); - -define @intrinsic_vfwmul_mask_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv4f32_nxv4f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfwmul.vv v8, v10, v11, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmul.mask.nxv4f32.nxv4f16.nxv4f16( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfwmul.nxv8f32.nxv8f16.nxv8f16( - , - , - i32); - -define @intrinsic_vfwmul_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwmul_vv_nxv8f32_nxv8f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfwmul.vv v12, v8, v10 -; CHECK-NEXT: vmv4r.v v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmul.nxv8f32.nxv8f16.nxv8f16( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfwmul.mask.nxv8f32.nxv8f16.nxv8f16( - , - , - , - , - i32, - i32); - -define @intrinsic_vfwmul_mask_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv8f32_nxv8f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfwmul.vv v8, v12, v14, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmul.mask.nxv8f32.nxv8f16.nxv8f16( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfwmul.nxv16f32.nxv16f16.nxv16f16( - , - , - i32); - -define @intrinsic_vfwmul_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwmul_vv_nxv16f32_nxv16f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfwmul.vv v16, v8, v12 -; CHECK-NEXT: vmv8r.v v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmul.nxv16f32.nxv16f16.nxv16f16( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfwmul.mask.nxv16f32.nxv16f16.nxv16f16( - , - , - , - , - i32, - i32); - -define @intrinsic_vfwmul_mask_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv16f32_nxv16f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfwmul.vv v8, v16, v20, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmul.mask.nxv16f32.nxv16f16.nxv16f16( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfwmul.nxv1f64.nxv1f32.nxv1f32( - , - , - i32); - -define @intrinsic_vfwmul_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwmul_vv_nxv1f64_nxv1f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfwmul.vv v10, v8, v9 -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmul.nxv1f64.nxv1f32.nxv1f32( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfwmul.mask.nxv1f64.nxv1f32.nxv1f32( - , - , - , - , - i32, - i32); - -define @intrinsic_vfwmul_mask_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv1f64_nxv1f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfwmul.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmul.mask.nxv1f64.nxv1f32.nxv1f32( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfwmul.nxv2f64.nxv2f32.nxv2f32( - , - , - i32); - -define @intrinsic_vfwmul_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwmul_vv_nxv2f64_nxv2f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfwmul.vv v10, v8, v9 -; CHECK-NEXT: vmv2r.v v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmul.nxv2f64.nxv2f32.nxv2f32( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfwmul.mask.nxv2f64.nxv2f32.nxv2f32( - , - , - , - , - i32, - i32); - -define @intrinsic_vfwmul_mask_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv2f64_nxv2f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfwmul.vv v8, v10, v11, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmul.mask.nxv2f64.nxv2f32.nxv2f32( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfwmul.nxv4f64.nxv4f32.nxv4f32( - , - , - i32); - -define @intrinsic_vfwmul_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwmul_vv_nxv4f64_nxv4f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfwmul.vv v12, v8, v10 -; CHECK-NEXT: vmv4r.v v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmul.nxv4f64.nxv4f32.nxv4f32( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfwmul.mask.nxv4f64.nxv4f32.nxv4f32( - , - , - , - , - i32, - i32); - -define @intrinsic_vfwmul_mask_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv4f64_nxv4f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfwmul.vv v8, v12, v14, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmul.mask.nxv4f64.nxv4f32.nxv4f32( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfwmul.nxv8f64.nxv8f32.nxv8f32( - , - , - i32); - -define @intrinsic_vfwmul_vv_nxv8f64_nxv8f32_nxv8f32( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwmul_vv_nxv8f64_nxv8f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfwmul.vv v16, v8, v12 -; CHECK-NEXT: vmv8r.v v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmul.nxv8f64.nxv8f32.nxv8f32( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfwmul.mask.nxv8f64.nxv8f32.nxv8f32( - , - , - , - , - i32, - i32); - -define @intrinsic_vfwmul_mask_vv_nxv8f64_nxv8f32_nxv8f32( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv8f64_nxv8f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfwmul.vv v8, v16, v20, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmul.mask.nxv8f64.nxv8f32.nxv8f32( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfwmul.nxv1f32.nxv1f16.f16( - , - half, - i32); - -define @intrinsic_vfwmul_vf_nxv1f32_nxv1f16_f16( %0, half %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwmul_vf_nxv1f32_nxv1f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfwmul.vf v9, v8, fa0 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmul.nxv1f32.nxv1f16.f16( - %0, - half %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfwmul.mask.nxv1f32.nxv1f16.f16( - , - , - half, - , - i32, - i32); - -define @intrinsic_vfwmul_mask_vf_nxv1f32_nxv1f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv1f32_nxv1f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfwmul.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmul.mask.nxv1f32.nxv1f16.f16( - %0, - %1, - half %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfwmul.nxv2f32.nxv2f16.f16( - , - half, - i32); - -define @intrinsic_vfwmul_vf_nxv2f32_nxv2f16_f16( %0, half %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwmul_vf_nxv2f32_nxv2f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfwmul.vf v9, v8, fa0 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmul.nxv2f32.nxv2f16.f16( - %0, - half %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfwmul.mask.nxv2f32.nxv2f16.f16( - , - , - half, - , - i32, - i32); - -define @intrinsic_vfwmul_mask_vf_nxv2f32_nxv2f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv2f32_nxv2f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfwmul.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmul.mask.nxv2f32.nxv2f16.f16( - %0, - %1, - half %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfwmul.nxv4f32.nxv4f16.f16( - , - half, - i32); - -define @intrinsic_vfwmul_vf_nxv4f32_nxv4f16_f16( %0, half %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwmul_vf_nxv4f32_nxv4f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfwmul.vf v10, v8, fa0 -; CHECK-NEXT: vmv2r.v v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmul.nxv4f32.nxv4f16.f16( - %0, - half %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfwmul.mask.nxv4f32.nxv4f16.f16( - , - , - half, - , - i32, - i32); - -define @intrinsic_vfwmul_mask_vf_nxv4f32_nxv4f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv4f32_nxv4f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfwmul.vf v8, v10, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmul.mask.nxv4f32.nxv4f16.f16( - %0, - %1, - half %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfwmul.nxv8f32.nxv8f16.f16( - , - half, - i32); - -define @intrinsic_vfwmul_vf_nxv8f32_nxv8f16_f16( %0, half %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwmul_vf_nxv8f32_nxv8f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfwmul.vf v12, v8, fa0 -; CHECK-NEXT: vmv4r.v v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmul.nxv8f32.nxv8f16.f16( - %0, - half %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfwmul.mask.nxv8f32.nxv8f16.f16( - , - , - half, - , - i32, - i32); - -define @intrinsic_vfwmul_mask_vf_nxv8f32_nxv8f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv8f32_nxv8f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfwmul.vf v8, v12, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmul.mask.nxv8f32.nxv8f16.f16( - %0, - %1, - half %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfwmul.nxv16f32.nxv16f16.f16( - , - half, - i32); - -define @intrinsic_vfwmul_vf_nxv16f32_nxv16f16_f16( %0, half %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwmul_vf_nxv16f32_nxv16f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfwmul.vf v16, v8, fa0 -; CHECK-NEXT: vmv8r.v v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmul.nxv16f32.nxv16f16.f16( - %0, - half %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfwmul.mask.nxv16f32.nxv16f16.f16( - , - , - half, - , - i32, - i32); - -define @intrinsic_vfwmul_mask_vf_nxv16f32_nxv16f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv16f32_nxv16f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfwmul.vf v8, v16, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmul.mask.nxv16f32.nxv16f16.f16( - %0, - %1, - half %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfwmul.nxv1f64.nxv1f32.f32( - , - float, - i32); - -define @intrinsic_vfwmul_vf_nxv1f64_nxv1f32_f32( %0, float %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwmul_vf_nxv1f64_nxv1f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfwmul.vf v9, v8, fa0 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmul.nxv1f64.nxv1f32.f32( - %0, - float %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfwmul.mask.nxv1f64.nxv1f32.f32( - , - , - float, - , - i32, - i32); - -define @intrinsic_vfwmul_mask_vf_nxv1f64_nxv1f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv1f64_nxv1f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfwmul.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmul.mask.nxv1f64.nxv1f32.f32( - %0, - %1, - float %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfwmul.nxv2f64.nxv2f32.f32( - , - float, - i32); - -define @intrinsic_vfwmul_vf_nxv2f64_nxv2f32_f32( %0, float %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwmul_vf_nxv2f64_nxv2f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfwmul.vf v10, v8, fa0 -; CHECK-NEXT: vmv2r.v v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmul.nxv2f64.nxv2f32.f32( - %0, - float %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfwmul.mask.nxv2f64.nxv2f32.f32( - , - , - float, - , - i32, - i32); - -define @intrinsic_vfwmul_mask_vf_nxv2f64_nxv2f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv2f64_nxv2f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfwmul.vf v8, v10, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmul.mask.nxv2f64.nxv2f32.f32( - %0, - %1, - float %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfwmul.nxv4f64.nxv4f32.f32( - , - float, - i32); - -define @intrinsic_vfwmul_vf_nxv4f64_nxv4f32_f32( %0, float %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwmul_vf_nxv4f64_nxv4f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfwmul.vf v12, v8, fa0 -; CHECK-NEXT: vmv4r.v v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmul.nxv4f64.nxv4f32.f32( - %0, - float %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfwmul.mask.nxv4f64.nxv4f32.f32( - , - , - float, - , - i32, - i32); - -define @intrinsic_vfwmul_mask_vf_nxv4f64_nxv4f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv4f64_nxv4f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfwmul.vf v8, v12, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmul.mask.nxv4f64.nxv4f32.f32( - %0, - %1, - float %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfwmul.nxv8f64.nxv8f32.f32( - , - float, - i32); - -define @intrinsic_vfwmul_vf_nxv8f64_nxv8f32_f32( %0, float %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwmul_vf_nxv8f64_nxv8f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfwmul.vf v16, v8, fa0 -; CHECK-NEXT: vmv8r.v v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmul.nxv8f64.nxv8f32.f32( - %0, - float %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfwmul.mask.nxv8f64.nxv8f32.f32( - , - , - float, - , - i32, - i32); - -define @intrinsic_vfwmul_mask_vf_nxv8f64_nxv8f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv8f64_nxv8f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfwmul.vf v8, v16, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmul.mask.nxv8f64.nxv8f32.f32( - %0, - %1, - float %2, - %3, - i32 %4, i32 1) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwmul-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwmul.ll similarity index 86% rename from llvm/test/CodeGen/RISCV/rvv/vfwmul-rv64.ll rename to llvm/test/CodeGen/RISCV/rvv/vfwmul.ll index fc7d8dcb59e3112c245c6d9320f44056fa31875a..b1ec8464047e1e707d8f3d7d3267751403f90e7b 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfwmul-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwmul.ll @@ -1,12 +1,14 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: -target-abi=lp64d < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfwmul.nxv1f32.nxv1f16.nxv1f16( , , - i64); + iXLen); -define @intrinsic_vfwmul_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, i64 %2) nounwind { +define @intrinsic_vfwmul_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_vv_nxv1f32_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -17,7 +19,7 @@ entry: %a = call @llvm.riscv.vfwmul.nxv1f32.nxv1f16.nxv1f16( %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -27,10 +29,10 @@ declare @llvm.riscv.vfwmul.mask.nxv1f32.nxv1f16.nxv1f16( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfwmul_mask_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfwmul_mask_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv1f32_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -42,7 +44,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -50,9 +52,9 @@ entry: declare @llvm.riscv.vfwmul.nxv2f32.nxv2f16.nxv2f16( , , - i64); + iXLen); -define @intrinsic_vfwmul_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, i64 %2) nounwind { +define @intrinsic_vfwmul_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_vv_nxv2f32_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -63,7 +65,7 @@ entry: %a = call @llvm.riscv.vfwmul.nxv2f32.nxv2f16.nxv2f16( %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -73,10 +75,10 @@ declare @llvm.riscv.vfwmul.mask.nxv2f32.nxv2f16.nxv2f16( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfwmul_mask_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfwmul_mask_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv2f32_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -88,7 +90,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -96,9 +98,9 @@ entry: declare @llvm.riscv.vfwmul.nxv4f32.nxv4f16.nxv4f16( , , - i64); + iXLen); -define @intrinsic_vfwmul_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, i64 %2) nounwind { +define @intrinsic_vfwmul_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_vv_nxv4f32_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -109,7 +111,7 @@ entry: %a = call @llvm.riscv.vfwmul.nxv4f32.nxv4f16.nxv4f16( %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -119,10 +121,10 @@ declare @llvm.riscv.vfwmul.mask.nxv4f32.nxv4f16.nxv4f16( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfwmul_mask_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfwmul_mask_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv4f32_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -134,7 +136,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -142,9 +144,9 @@ entry: declare @llvm.riscv.vfwmul.nxv8f32.nxv8f16.nxv8f16( , , - i64); + iXLen); -define @intrinsic_vfwmul_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, i64 %2) nounwind { +define @intrinsic_vfwmul_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_vv_nxv8f32_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -155,7 +157,7 @@ entry: %a = call @llvm.riscv.vfwmul.nxv8f32.nxv8f16.nxv8f16( %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -165,10 +167,10 @@ declare @llvm.riscv.vfwmul.mask.nxv8f32.nxv8f16.nxv8f16( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfwmul_mask_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfwmul_mask_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv8f32_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -180,7 +182,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -188,9 +190,9 @@ entry: declare @llvm.riscv.vfwmul.nxv16f32.nxv16f16.nxv16f16( , , - i64); + iXLen); -define @intrinsic_vfwmul_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, i64 %2) nounwind { +define @intrinsic_vfwmul_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_vv_nxv16f32_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -201,7 +203,7 @@ entry: %a = call @llvm.riscv.vfwmul.nxv16f32.nxv16f16.nxv16f16( %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -211,10 +213,10 @@ declare @llvm.riscv.vfwmul.mask.nxv16f32.nxv16f16.nxv16f16 , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfwmul_mask_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfwmul_mask_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv16f32_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -226,7 +228,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -234,9 +236,9 @@ entry: declare @llvm.riscv.vfwmul.nxv1f64.nxv1f32.nxv1f32( , , - i64); + iXLen); -define @intrinsic_vfwmul_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, i64 %2) nounwind { +define @intrinsic_vfwmul_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_vv_nxv1f64_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -247,7 +249,7 @@ entry: %a = call @llvm.riscv.vfwmul.nxv1f64.nxv1f32.nxv1f32( %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -257,10 +259,10 @@ declare @llvm.riscv.vfwmul.mask.nxv1f64.nxv1f32.nxv1f32( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfwmul_mask_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfwmul_mask_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv1f64_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -272,7 +274,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -280,9 +282,9 @@ entry: declare @llvm.riscv.vfwmul.nxv2f64.nxv2f32.nxv2f32( , , - i64); + iXLen); -define @intrinsic_vfwmul_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, i64 %2) nounwind { +define @intrinsic_vfwmul_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_vv_nxv2f64_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -293,7 +295,7 @@ entry: %a = call @llvm.riscv.vfwmul.nxv2f64.nxv2f32.nxv2f32( %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -303,10 +305,10 @@ declare @llvm.riscv.vfwmul.mask.nxv2f64.nxv2f32.nxv2f32( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfwmul_mask_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfwmul_mask_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv2f64_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -318,7 +320,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -326,9 +328,9 @@ entry: declare @llvm.riscv.vfwmul.nxv4f64.nxv4f32.nxv4f32( , , - i64); + iXLen); -define @intrinsic_vfwmul_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, i64 %2) nounwind { +define @intrinsic_vfwmul_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_vv_nxv4f64_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -339,7 +341,7 @@ entry: %a = call @llvm.riscv.vfwmul.nxv4f64.nxv4f32.nxv4f32( %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -349,10 +351,10 @@ declare @llvm.riscv.vfwmul.mask.nxv4f64.nxv4f32.nxv4f32( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfwmul_mask_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfwmul_mask_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv4f64_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -364,7 +366,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -372,9 +374,9 @@ entry: declare @llvm.riscv.vfwmul.nxv8f64.nxv8f32.nxv8f32( , , - i64); + iXLen); -define @intrinsic_vfwmul_vv_nxv8f64_nxv8f32_nxv8f32( %0, %1, i64 %2) nounwind { +define @intrinsic_vfwmul_vv_nxv8f64_nxv8f32_nxv8f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_vv_nxv8f64_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -385,7 +387,7 @@ entry: %a = call @llvm.riscv.vfwmul.nxv8f64.nxv8f32.nxv8f32( %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -395,10 +397,10 @@ declare @llvm.riscv.vfwmul.mask.nxv8f64.nxv8f32.nxv8f32( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfwmul_mask_vv_nxv8f64_nxv8f32_nxv8f32( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfwmul_mask_vv_nxv8f64_nxv8f32_nxv8f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv8f64_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -410,7 +412,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -418,9 +420,9 @@ entry: declare @llvm.riscv.vfwmul.nxv1f32.nxv1f16.f16( , half, - i64); + iXLen); -define @intrinsic_vfwmul_vf_nxv1f32_nxv1f16_f16( %0, half %1, i64 %2) nounwind { +define @intrinsic_vfwmul_vf_nxv1f32_nxv1f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_vf_nxv1f32_nxv1f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -431,7 +433,7 @@ entry: %a = call @llvm.riscv.vfwmul.nxv1f32.nxv1f16.f16( %0, half %1, - i64 %2) + iXLen %2) ret %a } @@ -441,10 +443,10 @@ declare @llvm.riscv.vfwmul.mask.nxv1f32.nxv1f16.f16( , half, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfwmul_mask_vf_nxv1f32_nxv1f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { +define @intrinsic_vfwmul_mask_vf_nxv1f32_nxv1f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv1f32_nxv1f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -456,7 +458,7 @@ entry: %1, half %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -464,9 +466,9 @@ entry: declare @llvm.riscv.vfwmul.nxv2f32.nxv2f16.f16( , half, - i64); + iXLen); -define @intrinsic_vfwmul_vf_nxv2f32_nxv2f16_f16( %0, half %1, i64 %2) nounwind { +define @intrinsic_vfwmul_vf_nxv2f32_nxv2f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_vf_nxv2f32_nxv2f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -477,7 +479,7 @@ entry: %a = call @llvm.riscv.vfwmul.nxv2f32.nxv2f16.f16( %0, half %1, - i64 %2) + iXLen %2) ret %a } @@ -487,10 +489,10 @@ declare @llvm.riscv.vfwmul.mask.nxv2f32.nxv2f16.f16( , half, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfwmul_mask_vf_nxv2f32_nxv2f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { +define @intrinsic_vfwmul_mask_vf_nxv2f32_nxv2f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv2f32_nxv2f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -502,7 +504,7 @@ entry: %1, half %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -510,9 +512,9 @@ entry: declare @llvm.riscv.vfwmul.nxv4f32.nxv4f16.f16( , half, - i64); + iXLen); -define @intrinsic_vfwmul_vf_nxv4f32_nxv4f16_f16( %0, half %1, i64 %2) nounwind { +define @intrinsic_vfwmul_vf_nxv4f32_nxv4f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_vf_nxv4f32_nxv4f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -523,7 +525,7 @@ entry: %a = call @llvm.riscv.vfwmul.nxv4f32.nxv4f16.f16( %0, half %1, - i64 %2) + iXLen %2) ret %a } @@ -533,10 +535,10 @@ declare @llvm.riscv.vfwmul.mask.nxv4f32.nxv4f16.f16( , half, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfwmul_mask_vf_nxv4f32_nxv4f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { +define @intrinsic_vfwmul_mask_vf_nxv4f32_nxv4f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv4f32_nxv4f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -548,7 +550,7 @@ entry: %1, half %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -556,9 +558,9 @@ entry: declare @llvm.riscv.vfwmul.nxv8f32.nxv8f16.f16( , half, - i64); + iXLen); -define @intrinsic_vfwmul_vf_nxv8f32_nxv8f16_f16( %0, half %1, i64 %2) nounwind { +define @intrinsic_vfwmul_vf_nxv8f32_nxv8f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_vf_nxv8f32_nxv8f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -569,7 +571,7 @@ entry: %a = call @llvm.riscv.vfwmul.nxv8f32.nxv8f16.f16( %0, half %1, - i64 %2) + iXLen %2) ret %a } @@ -579,10 +581,10 @@ declare @llvm.riscv.vfwmul.mask.nxv8f32.nxv8f16.f16( , half, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfwmul_mask_vf_nxv8f32_nxv8f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { +define @intrinsic_vfwmul_mask_vf_nxv8f32_nxv8f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv8f32_nxv8f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -594,7 +596,7 @@ entry: %1, half %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -602,9 +604,9 @@ entry: declare @llvm.riscv.vfwmul.nxv16f32.nxv16f16.f16( , half, - i64); + iXLen); -define @intrinsic_vfwmul_vf_nxv16f32_nxv16f16_f16( %0, half %1, i64 %2) nounwind { +define @intrinsic_vfwmul_vf_nxv16f32_nxv16f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_vf_nxv16f32_nxv16f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -615,7 +617,7 @@ entry: %a = call @llvm.riscv.vfwmul.nxv16f32.nxv16f16.f16( %0, half %1, - i64 %2) + iXLen %2) ret %a } @@ -625,10 +627,10 @@ declare @llvm.riscv.vfwmul.mask.nxv16f32.nxv16f16.f16( , half, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfwmul_mask_vf_nxv16f32_nxv16f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { +define @intrinsic_vfwmul_mask_vf_nxv16f32_nxv16f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv16f32_nxv16f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -640,7 +642,7 @@ entry: %1, half %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -648,9 +650,9 @@ entry: declare @llvm.riscv.vfwmul.nxv1f64.nxv1f32.f32( , float, - i64); + iXLen); -define @intrinsic_vfwmul_vf_nxv1f64_nxv1f32_f32( %0, float %1, i64 %2) nounwind { +define @intrinsic_vfwmul_vf_nxv1f64_nxv1f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_vf_nxv1f64_nxv1f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -661,7 +663,7 @@ entry: %a = call @llvm.riscv.vfwmul.nxv1f64.nxv1f32.f32( %0, float %1, - i64 %2) + iXLen %2) ret %a } @@ -671,10 +673,10 @@ declare @llvm.riscv.vfwmul.mask.nxv1f64.nxv1f32.f32( , float, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfwmul_mask_vf_nxv1f64_nxv1f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { +define @intrinsic_vfwmul_mask_vf_nxv1f64_nxv1f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv1f64_nxv1f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -686,7 +688,7 @@ entry: %1, float %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -694,9 +696,9 @@ entry: declare @llvm.riscv.vfwmul.nxv2f64.nxv2f32.f32( , float, - i64); + iXLen); -define @intrinsic_vfwmul_vf_nxv2f64_nxv2f32_f32( %0, float %1, i64 %2) nounwind { +define @intrinsic_vfwmul_vf_nxv2f64_nxv2f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_vf_nxv2f64_nxv2f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -707,7 +709,7 @@ entry: %a = call @llvm.riscv.vfwmul.nxv2f64.nxv2f32.f32( %0, float %1, - i64 %2) + iXLen %2) ret %a } @@ -717,10 +719,10 @@ declare @llvm.riscv.vfwmul.mask.nxv2f64.nxv2f32.f32( , float, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfwmul_mask_vf_nxv2f64_nxv2f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { +define @intrinsic_vfwmul_mask_vf_nxv2f64_nxv2f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv2f64_nxv2f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -732,7 +734,7 @@ entry: %1, float %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -740,9 +742,9 @@ entry: declare @llvm.riscv.vfwmul.nxv4f64.nxv4f32.f32( , float, - i64); + iXLen); -define @intrinsic_vfwmul_vf_nxv4f64_nxv4f32_f32( %0, float %1, i64 %2) nounwind { +define @intrinsic_vfwmul_vf_nxv4f64_nxv4f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_vf_nxv4f64_nxv4f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -753,7 +755,7 @@ entry: %a = call @llvm.riscv.vfwmul.nxv4f64.nxv4f32.f32( %0, float %1, - i64 %2) + iXLen %2) ret %a } @@ -763,10 +765,10 @@ declare @llvm.riscv.vfwmul.mask.nxv4f64.nxv4f32.f32( , float, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfwmul_mask_vf_nxv4f64_nxv4f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { +define @intrinsic_vfwmul_mask_vf_nxv4f64_nxv4f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv4f64_nxv4f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -778,7 +780,7 @@ entry: %1, float %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -786,9 +788,9 @@ entry: declare @llvm.riscv.vfwmul.nxv8f64.nxv8f32.f32( , float, - i64); + iXLen); -define @intrinsic_vfwmul_vf_nxv8f64_nxv8f32_f32( %0, float %1, i64 %2) nounwind { +define @intrinsic_vfwmul_vf_nxv8f64_nxv8f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_vf_nxv8f64_nxv8f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -799,7 +801,7 @@ entry: %a = call @llvm.riscv.vfwmul.nxv8f64.nxv8f32.f32( %0, float %1, - i64 %2) + iXLen %2) ret %a } @@ -809,10 +811,10 @@ declare @llvm.riscv.vfwmul.mask.nxv8f64.nxv8f32.f32( , float, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfwmul_mask_vf_nxv8f64_nxv8f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { +define @intrinsic_vfwmul_mask_vf_nxv8f64_nxv8f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv8f64_nxv8f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -824,7 +826,7 @@ entry: %1, float %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwnmacc-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwnmacc-rv64.ll deleted file mode 100644 index ff2b40cfac2cdadc5d170aaeb8523d6d1678b7e6..0000000000000000000000000000000000000000 --- a/llvm/test/CodeGen/RISCV/rvv/vfwnmacc-rv64.ll +++ /dev/null @@ -1,830 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: -target-abi=lp64d < %s | FileCheck %s -declare @llvm.riscv.vfwnmacc.nxv1f32.nxv1f16( - , - , - , - i64); - -define @intrinsic_vfwnmacc_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv1f32_nxv1f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu -; CHECK-NEXT: vfwnmacc.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwnmacc.nxv1f32.nxv1f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwnmacc.mask.nxv1f32.nxv1f16( - , - , - , - , - i64); - -define @intrinsic_vfwnmacc_mask_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv1f32_nxv1f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu -; CHECK-NEXT: vfwnmacc.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwnmacc.mask.nxv1f32.nxv1f16( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwnmacc.nxv2f32.nxv2f16( - , - , - , - i64); - -define @intrinsic_vfwnmacc_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv2f32_nxv2f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu -; CHECK-NEXT: vfwnmacc.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwnmacc.nxv2f32.nxv2f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwnmacc.mask.nxv2f32.nxv2f16( - , - , - , - , - i64); - -define @intrinsic_vfwnmacc_mask_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv2f32_nxv2f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu -; CHECK-NEXT: vfwnmacc.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwnmacc.mask.nxv2f32.nxv2f16( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwnmacc.nxv4f32.nxv4f16( - , - , - , - i64); - -define @intrinsic_vfwnmacc_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv4f32_nxv4f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu -; CHECK-NEXT: vfwnmacc.vv v8, v10, v11 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwnmacc.nxv4f32.nxv4f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwnmacc.mask.nxv4f32.nxv4f16( - , - , - , - , - i64); - -define @intrinsic_vfwnmacc_mask_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv4f32_nxv4f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu -; CHECK-NEXT: vfwnmacc.vv v8, v10, v11, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwnmacc.mask.nxv4f32.nxv4f16( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwnmacc.nxv8f32.nxv8f16( - , - , - , - i64); - -define @intrinsic_vfwnmacc_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv8f32_nxv8f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu -; CHECK-NEXT: vfwnmacc.vv v8, v12, v14 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwnmacc.nxv8f32.nxv8f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwnmacc.mask.nxv8f32.nxv8f16( - , - , - , - , - i64); - -define @intrinsic_vfwnmacc_mask_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv8f32_nxv8f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu -; CHECK-NEXT: vfwnmacc.vv v8, v12, v14, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwnmacc.mask.nxv8f32.nxv8f16( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwnmacc.nxv16f32.nxv16f16( - , - , - , - i64); - -define @intrinsic_vfwnmacc_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv16f32_nxv16f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu -; CHECK-NEXT: vfwnmacc.vv v8, v16, v20 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwnmacc.nxv16f32.nxv16f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwnmacc.mask.nxv16f32.nxv16f16( - , - , - , - , - i64); - -define @intrinsic_vfwnmacc_mask_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv16f32_nxv16f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu -; CHECK-NEXT: vfwnmacc.vv v8, v16, v20, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwnmacc.mask.nxv16f32.nxv16f16( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwnmacc.nxv1f64.nxv1f32( - , - , - , - i64); - -define @intrinsic_vfwnmacc_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv1f64_nxv1f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu -; CHECK-NEXT: vfwnmacc.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwnmacc.nxv1f64.nxv1f32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwnmacc.mask.nxv1f64.nxv1f32( - , - , - , - , - i64); - -define @intrinsic_vfwnmacc_mask_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv1f64_nxv1f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu -; CHECK-NEXT: vfwnmacc.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwnmacc.mask.nxv1f64.nxv1f32( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwnmacc.nxv2f64.nxv2f32( - , - , - , - i64); - -define @intrinsic_vfwnmacc_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv2f64_nxv2f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu -; CHECK-NEXT: vfwnmacc.vv v8, v10, v11 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwnmacc.nxv2f64.nxv2f32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwnmacc.mask.nxv2f64.nxv2f32( - , - , - , - , - i64); - -define @intrinsic_vfwnmacc_mask_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv2f64_nxv2f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu -; CHECK-NEXT: vfwnmacc.vv v8, v10, v11, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwnmacc.mask.nxv2f64.nxv2f32( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwnmacc.nxv4f64.nxv4f32( - , - , - , - i64); - -define @intrinsic_vfwnmacc_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv4f64_nxv4f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu -; CHECK-NEXT: vfwnmacc.vv v8, v12, v14 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwnmacc.nxv4f64.nxv4f32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwnmacc.mask.nxv4f64.nxv4f32( - , - , - , - , - i64); - -define @intrinsic_vfwnmacc_mask_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv4f64_nxv4f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu -; CHECK-NEXT: vfwnmacc.vv v8, v12, v14, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwnmacc.mask.nxv4f64.nxv4f32( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwnmacc.nxv8f64.nxv8f32( - , - , - , - i64); - -define @intrinsic_vfwnmacc_vv_nxv8f64_nxv8f32_nxv8f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv8f64_nxv8f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu -; CHECK-NEXT: vfwnmacc.vv v8, v16, v20 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwnmacc.nxv8f64.nxv8f32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwnmacc.mask.nxv8f64.nxv8f32( - , - , - , - , - i64); - -define @intrinsic_vfwnmacc_mask_vv_nxv8f64_nxv8f32_nxv8f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv8f64_nxv8f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu -; CHECK-NEXT: vfwnmacc.vv v8, v16, v20, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwnmacc.mask.nxv8f64.nxv8f32( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwnmacc.nxv1f32.f16( - , - half, - , - i64); - -define @intrinsic_vfwnmacc_vf_nxv1f32_f16_nxv1f16( %0, half %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv1f32_f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu -; CHECK-NEXT: vfwnmacc.vf v8, fa0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwnmacc.nxv1f32.f16( - %0, - half %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwnmacc.mask.nxv1f32.f16( - , - half, - , - , - i64); - -define @intrinsic_vfwnmacc_mask_vf_nxv1f32_f16_nxv1f16( %0, half %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv1f32_f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu -; CHECK-NEXT: vfwnmacc.vf v8, fa0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwnmacc.mask.nxv1f32.f16( - %0, - half %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwnmacc.nxv2f32.f16( - , - half, - , - i64); - -define @intrinsic_vfwnmacc_vf_nxv2f32_f16_nxv2f16( %0, half %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv2f32_f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu -; CHECK-NEXT: vfwnmacc.vf v8, fa0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwnmacc.nxv2f32.f16( - %0, - half %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwnmacc.mask.nxv2f32.f16( - , - half, - , - , - i64); - -define @intrinsic_vfwnmacc_mask_vf_nxv2f32_f16_nxv2f16( %0, half %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv2f32_f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu -; CHECK-NEXT: vfwnmacc.vf v8, fa0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwnmacc.mask.nxv2f32.f16( - %0, - half %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwnmacc.nxv4f32.f16( - , - half, - , - i64); - -define @intrinsic_vfwnmacc_vf_nxv4f32_f16_nxv4f16( %0, half %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv4f32_f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu -; CHECK-NEXT: vfwnmacc.vf v8, fa0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwnmacc.nxv4f32.f16( - %0, - half %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwnmacc.mask.nxv4f32.f16( - , - half, - , - , - i64); - -define @intrinsic_vfwnmacc_mask_vf_nxv4f32_f16_nxv4f16( %0, half %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv4f32_f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu -; CHECK-NEXT: vfwnmacc.vf v8, fa0, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwnmacc.mask.nxv4f32.f16( - %0, - half %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwnmacc.nxv8f32.f16( - , - half, - , - i64); - -define @intrinsic_vfwnmacc_vf_nxv8f32_f16_nxv8f16( %0, half %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv8f32_f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu -; CHECK-NEXT: vfwnmacc.vf v8, fa0, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwnmacc.nxv8f32.f16( - %0, - half %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwnmacc.mask.nxv8f32.f16( - , - half, - , - , - i64); - -define @intrinsic_vfwnmacc_mask_vf_nxv8f32_f16_nxv8f16( %0, half %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv8f32_f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu -; CHECK-NEXT: vfwnmacc.vf v8, fa0, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwnmacc.mask.nxv8f32.f16( - %0, - half %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwnmacc.nxv16f32.f16( - , - half, - , - i64); - -define @intrinsic_vfwnmacc_vf_nxv16f32_f16_nxv16f16( %0, half %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv16f32_f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu -; CHECK-NEXT: vfwnmacc.vf v8, fa0, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwnmacc.nxv16f32.f16( - %0, - half %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwnmacc.mask.nxv16f32.f16( - , - half, - , - , - i64); - -define @intrinsic_vfwnmacc_mask_vf_nxv16f32_f16_nxv16f16( %0, half %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv16f32_f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu -; CHECK-NEXT: vfwnmacc.vf v8, fa0, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwnmacc.mask.nxv16f32.f16( - %0, - half %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwnmacc.nxv1f64.f32( - , - float, - , - i64); - -define @intrinsic_vfwnmacc_vf_nxv1f64_f32_nxv1f32( %0, float %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv1f64_f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu -; CHECK-NEXT: vfwnmacc.vf v8, fa0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwnmacc.nxv1f64.f32( - %0, - float %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwnmacc.mask.nxv1f64.f32( - , - float, - , - , - i64); - -define @intrinsic_vfwnmacc_mask_vf_nxv1f64_f32_nxv1f32( %0, float %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv1f64_f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu -; CHECK-NEXT: vfwnmacc.vf v8, fa0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwnmacc.mask.nxv1f64.f32( - %0, - float %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwnmacc.nxv2f64.f32( - , - float, - , - i64); - -define @intrinsic_vfwnmacc_vf_nxv2f64_f32_nxv2f32( %0, float %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv2f64_f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu -; CHECK-NEXT: vfwnmacc.vf v8, fa0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwnmacc.nxv2f64.f32( - %0, - float %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwnmacc.mask.nxv2f64.f32( - , - float, - , - , - i64); - -define @intrinsic_vfwnmacc_mask_vf_nxv2f64_f32_nxv2f32( %0, float %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv2f64_f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu -; CHECK-NEXT: vfwnmacc.vf v8, fa0, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwnmacc.mask.nxv2f64.f32( - %0, - float %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwnmacc.nxv4f64.f32( - , - float, - , - i64); - -define @intrinsic_vfwnmacc_vf_nxv4f64_f32_nxv4f32( %0, float %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv4f64_f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu -; CHECK-NEXT: vfwnmacc.vf v8, fa0, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwnmacc.nxv4f64.f32( - %0, - float %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwnmacc.mask.nxv4f64.f32( - , - float, - , - , - i64); - -define @intrinsic_vfwnmacc_mask_vf_nxv4f64_f32_nxv4f32( %0, float %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv4f64_f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu -; CHECK-NEXT: vfwnmacc.vf v8, fa0, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwnmacc.mask.nxv4f64.f32( - %0, - float %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwnmacc.nxv8f64.f32( - , - float, - , - i64); - -define @intrinsic_vfwnmacc_vf_nxv8f64_f32_nxv8f32( %0, float %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv8f64_f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu -; CHECK-NEXT: vfwnmacc.vf v8, fa0, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwnmacc.nxv8f64.f32( - %0, - float %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwnmacc.mask.nxv8f64.f32( - , - float, - , - , - i64); - -define @intrinsic_vfwnmacc_mask_vf_nxv8f64_f32_nxv8f32( %0, float %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv8f64_f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu -; CHECK-NEXT: vfwnmacc.vf v8, fa0, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwnmacc.mask.nxv8f64.f32( - %0, - float %1, - %2, - %3, - i64 %4) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwnmacc-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwnmacc.ll similarity index 89% rename from llvm/test/CodeGen/RISCV/rvv/vfwnmacc-rv32.ll rename to llvm/test/CodeGen/RISCV/rvv/vfwnmacc.ll index 02842609f56856eb63d5e14c7ed041f267f149fa..4ccd0f8c55835529d16a78e00b629d96c393bb99 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfwnmacc-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwnmacc.ll @@ -1,13 +1,15 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: -target-abi=ilp32d < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfwnmacc.nxv1f32.nxv1f16( , , , - i32); + iXLen); -define @intrinsic_vfwnmacc_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwnmacc_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv1f32_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu @@ -18,7 +20,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -28,9 +30,9 @@ declare @llvm.riscv.vfwnmacc.mask.nxv1f32.nxv1f16( , , , - i32); + iXLen); -define @intrinsic_vfwnmacc_mask_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwnmacc_mask_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv1f32_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu @@ -42,7 +44,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -51,9 +53,9 @@ declare @llvm.riscv.vfwnmacc.nxv2f32.nxv2f16( , , , - i32); + iXLen); -define @intrinsic_vfwnmacc_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwnmacc_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv2f32_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu @@ -64,7 +66,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -74,9 +76,9 @@ declare @llvm.riscv.vfwnmacc.mask.nxv2f32.nxv2f16( , , , - i32); + iXLen); -define @intrinsic_vfwnmacc_mask_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwnmacc_mask_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv2f32_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu @@ -88,7 +90,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -97,9 +99,9 @@ declare @llvm.riscv.vfwnmacc.nxv4f32.nxv4f16( , , , - i32); + iXLen); -define @intrinsic_vfwnmacc_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwnmacc_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv4f32_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu @@ -110,7 +112,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -120,9 +122,9 @@ declare @llvm.riscv.vfwnmacc.mask.nxv4f32.nxv4f16( , , , - i32); + iXLen); -define @intrinsic_vfwnmacc_mask_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwnmacc_mask_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv4f32_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu @@ -134,7 +136,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -143,9 +145,9 @@ declare @llvm.riscv.vfwnmacc.nxv8f32.nxv8f16( , , , - i32); + iXLen); -define @intrinsic_vfwnmacc_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwnmacc_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv8f32_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu @@ -156,7 +158,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -166,9 +168,9 @@ declare @llvm.riscv.vfwnmacc.mask.nxv8f32.nxv8f16( , , , - i32); + iXLen); -define @intrinsic_vfwnmacc_mask_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwnmacc_mask_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv8f32_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu @@ -180,7 +182,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -189,9 +191,9 @@ declare @llvm.riscv.vfwnmacc.nxv16f32.nxv16f16( , , , - i32); + iXLen); -define @intrinsic_vfwnmacc_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwnmacc_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv16f32_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu @@ -202,7 +204,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -212,9 +214,9 @@ declare @llvm.riscv.vfwnmacc.mask.nxv16f32.nxv16f16( , , , - i32); + iXLen); -define @intrinsic_vfwnmacc_mask_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwnmacc_mask_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv16f32_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu @@ -226,7 +228,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -235,9 +237,9 @@ declare @llvm.riscv.vfwnmacc.nxv1f64.nxv1f32( , , , - i32); + iXLen); -define @intrinsic_vfwnmacc_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwnmacc_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv1f64_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu @@ -248,7 +250,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -258,9 +260,9 @@ declare @llvm.riscv.vfwnmacc.mask.nxv1f64.nxv1f32( , , , - i32); + iXLen); -define @intrinsic_vfwnmacc_mask_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwnmacc_mask_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv1f64_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu @@ -272,7 +274,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -281,9 +283,9 @@ declare @llvm.riscv.vfwnmacc.nxv2f64.nxv2f32( , , , - i32); + iXLen); -define @intrinsic_vfwnmacc_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwnmacc_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv2f64_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu @@ -294,7 +296,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -304,9 +306,9 @@ declare @llvm.riscv.vfwnmacc.mask.nxv2f64.nxv2f32( , , , - i32); + iXLen); -define @intrinsic_vfwnmacc_mask_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwnmacc_mask_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv2f64_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu @@ -318,7 +320,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -327,9 +329,9 @@ declare @llvm.riscv.vfwnmacc.nxv4f64.nxv4f32( , , , - i32); + iXLen); -define @intrinsic_vfwnmacc_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwnmacc_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv4f64_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu @@ -340,7 +342,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -350,9 +352,9 @@ declare @llvm.riscv.vfwnmacc.mask.nxv4f64.nxv4f32( , , , - i32); + iXLen); -define @intrinsic_vfwnmacc_mask_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwnmacc_mask_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv4f64_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu @@ -364,7 +366,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -373,9 +375,9 @@ declare @llvm.riscv.vfwnmacc.nxv8f64.nxv8f32( , , , - i32); + iXLen); -define @intrinsic_vfwnmacc_vv_nxv8f64_nxv8f32_nxv8f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwnmacc_vv_nxv8f64_nxv8f32_nxv8f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv8f64_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu @@ -386,7 +388,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -396,9 +398,9 @@ declare @llvm.riscv.vfwnmacc.mask.nxv8f64.nxv8f32( , , , - i32); + iXLen); -define @intrinsic_vfwnmacc_mask_vv_nxv8f64_nxv8f32_nxv8f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwnmacc_mask_vv_nxv8f64_nxv8f32_nxv8f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv8f64_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu @@ -410,7 +412,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -419,9 +421,9 @@ declare @llvm.riscv.vfwnmacc.nxv1f32.f16( , half, , - i32); + iXLen); -define @intrinsic_vfwnmacc_vf_nxv1f32_f16_nxv1f16( %0, half %1, %2, i32 %3) nounwind { +define @intrinsic_vfwnmacc_vf_nxv1f32_f16_nxv1f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv1f32_f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu @@ -432,7 +434,7 @@ entry: %0, half %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -442,9 +444,9 @@ declare @llvm.riscv.vfwnmacc.mask.nxv1f32.f16( half, , , - i32); + iXLen); -define @intrinsic_vfwnmacc_mask_vf_nxv1f32_f16_nxv1f16( %0, half %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwnmacc_mask_vf_nxv1f32_f16_nxv1f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv1f32_f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu @@ -456,7 +458,7 @@ entry: half %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -465,9 +467,9 @@ declare @llvm.riscv.vfwnmacc.nxv2f32.f16( , half, , - i32); + iXLen); -define @intrinsic_vfwnmacc_vf_nxv2f32_f16_nxv2f16( %0, half %1, %2, i32 %3) nounwind { +define @intrinsic_vfwnmacc_vf_nxv2f32_f16_nxv2f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv2f32_f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu @@ -478,7 +480,7 @@ entry: %0, half %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -488,9 +490,9 @@ declare @llvm.riscv.vfwnmacc.mask.nxv2f32.f16( half, , , - i32); + iXLen); -define @intrinsic_vfwnmacc_mask_vf_nxv2f32_f16_nxv2f16( %0, half %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwnmacc_mask_vf_nxv2f32_f16_nxv2f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv2f32_f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu @@ -502,7 +504,7 @@ entry: half %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -511,9 +513,9 @@ declare @llvm.riscv.vfwnmacc.nxv4f32.f16( , half, , - i32); + iXLen); -define @intrinsic_vfwnmacc_vf_nxv4f32_f16_nxv4f16( %0, half %1, %2, i32 %3) nounwind { +define @intrinsic_vfwnmacc_vf_nxv4f32_f16_nxv4f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv4f32_f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu @@ -524,7 +526,7 @@ entry: %0, half %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -534,9 +536,9 @@ declare @llvm.riscv.vfwnmacc.mask.nxv4f32.f16( half, , , - i32); + iXLen); -define @intrinsic_vfwnmacc_mask_vf_nxv4f32_f16_nxv4f16( %0, half %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwnmacc_mask_vf_nxv4f32_f16_nxv4f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv4f32_f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu @@ -548,7 +550,7 @@ entry: half %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -557,9 +559,9 @@ declare @llvm.riscv.vfwnmacc.nxv8f32.f16( , half, , - i32); + iXLen); -define @intrinsic_vfwnmacc_vf_nxv8f32_f16_nxv8f16( %0, half %1, %2, i32 %3) nounwind { +define @intrinsic_vfwnmacc_vf_nxv8f32_f16_nxv8f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv8f32_f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu @@ -570,7 +572,7 @@ entry: %0, half %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -580,9 +582,9 @@ declare @llvm.riscv.vfwnmacc.mask.nxv8f32.f16( half, , , - i32); + iXLen); -define @intrinsic_vfwnmacc_mask_vf_nxv8f32_f16_nxv8f16( %0, half %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwnmacc_mask_vf_nxv8f32_f16_nxv8f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv8f32_f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu @@ -594,7 +596,7 @@ entry: half %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -603,9 +605,9 @@ declare @llvm.riscv.vfwnmacc.nxv16f32.f16( , half, , - i32); + iXLen); -define @intrinsic_vfwnmacc_vf_nxv16f32_f16_nxv16f16( %0, half %1, %2, i32 %3) nounwind { +define @intrinsic_vfwnmacc_vf_nxv16f32_f16_nxv16f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv16f32_f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu @@ -616,7 +618,7 @@ entry: %0, half %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -626,9 +628,9 @@ declare @llvm.riscv.vfwnmacc.mask.nxv16f32.f16( half, , , - i32); + iXLen); -define @intrinsic_vfwnmacc_mask_vf_nxv16f32_f16_nxv16f16( %0, half %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwnmacc_mask_vf_nxv16f32_f16_nxv16f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv16f32_f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu @@ -640,7 +642,7 @@ entry: half %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -649,9 +651,9 @@ declare @llvm.riscv.vfwnmacc.nxv1f64.f32( , float, , - i32); + iXLen); -define @intrinsic_vfwnmacc_vf_nxv1f64_f32_nxv1f32( %0, float %1, %2, i32 %3) nounwind { +define @intrinsic_vfwnmacc_vf_nxv1f64_f32_nxv1f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv1f64_f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu @@ -662,7 +664,7 @@ entry: %0, float %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -672,9 +674,9 @@ declare @llvm.riscv.vfwnmacc.mask.nxv1f64.f32( float, , , - i32); + iXLen); -define @intrinsic_vfwnmacc_mask_vf_nxv1f64_f32_nxv1f32( %0, float %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwnmacc_mask_vf_nxv1f64_f32_nxv1f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv1f64_f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu @@ -686,7 +688,7 @@ entry: float %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -695,9 +697,9 @@ declare @llvm.riscv.vfwnmacc.nxv2f64.f32( , float, , - i32); + iXLen); -define @intrinsic_vfwnmacc_vf_nxv2f64_f32_nxv2f32( %0, float %1, %2, i32 %3) nounwind { +define @intrinsic_vfwnmacc_vf_nxv2f64_f32_nxv2f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv2f64_f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu @@ -708,7 +710,7 @@ entry: %0, float %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -718,9 +720,9 @@ declare @llvm.riscv.vfwnmacc.mask.nxv2f64.f32( float, , , - i32); + iXLen); -define @intrinsic_vfwnmacc_mask_vf_nxv2f64_f32_nxv2f32( %0, float %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwnmacc_mask_vf_nxv2f64_f32_nxv2f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv2f64_f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu @@ -732,7 +734,7 @@ entry: float %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -741,9 +743,9 @@ declare @llvm.riscv.vfwnmacc.nxv4f64.f32( , float, , - i32); + iXLen); -define @intrinsic_vfwnmacc_vf_nxv4f64_f32_nxv4f32( %0, float %1, %2, i32 %3) nounwind { +define @intrinsic_vfwnmacc_vf_nxv4f64_f32_nxv4f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv4f64_f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu @@ -754,7 +756,7 @@ entry: %0, float %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -764,9 +766,9 @@ declare @llvm.riscv.vfwnmacc.mask.nxv4f64.f32( float, , , - i32); + iXLen); -define @intrinsic_vfwnmacc_mask_vf_nxv4f64_f32_nxv4f32( %0, float %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwnmacc_mask_vf_nxv4f64_f32_nxv4f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv4f64_f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu @@ -778,7 +780,7 @@ entry: float %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -787,9 +789,9 @@ declare @llvm.riscv.vfwnmacc.nxv8f64.f32( , float, , - i32); + iXLen); -define @intrinsic_vfwnmacc_vf_nxv8f64_f32_nxv8f32( %0, float %1, %2, i32 %3) nounwind { +define @intrinsic_vfwnmacc_vf_nxv8f64_f32_nxv8f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv8f64_f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu @@ -800,7 +802,7 @@ entry: %0, float %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -810,9 +812,9 @@ declare @llvm.riscv.vfwnmacc.mask.nxv8f64.f32( float, , , - i32); + iXLen); -define @intrinsic_vfwnmacc_mask_vf_nxv8f64_f32_nxv8f32( %0, float %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwnmacc_mask_vf_nxv8f64_f32_nxv8f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv8f64_f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu @@ -824,7 +826,7 @@ entry: float %1, %2, %3, - i32 %4) + iXLen %4) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwnmsac-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwnmsac-rv64.ll deleted file mode 100644 index 2fe370bb1d82f4929b2a5a651811d61b704b31fa..0000000000000000000000000000000000000000 --- a/llvm/test/CodeGen/RISCV/rvv/vfwnmsac-rv64.ll +++ /dev/null @@ -1,830 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: -target-abi=lp64d < %s | FileCheck %s -declare @llvm.riscv.vfwnmsac.nxv1f32.nxv1f16( - , - , - , - i64); - -define @intrinsic_vfwnmsac_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv1f32_nxv1f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu -; CHECK-NEXT: vfwnmsac.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwnmsac.nxv1f32.nxv1f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwnmsac.mask.nxv1f32.nxv1f16( - , - , - , - , - i64); - -define @intrinsic_vfwnmsac_mask_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv1f32_nxv1f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu -; CHECK-NEXT: vfwnmsac.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwnmsac.mask.nxv1f32.nxv1f16( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwnmsac.nxv2f32.nxv2f16( - , - , - , - i64); - -define @intrinsic_vfwnmsac_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv2f32_nxv2f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu -; CHECK-NEXT: vfwnmsac.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwnmsac.nxv2f32.nxv2f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwnmsac.mask.nxv2f32.nxv2f16( - , - , - , - , - i64); - -define @intrinsic_vfwnmsac_mask_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv2f32_nxv2f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu -; CHECK-NEXT: vfwnmsac.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwnmsac.mask.nxv2f32.nxv2f16( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwnmsac.nxv4f32.nxv4f16( - , - , - , - i64); - -define @intrinsic_vfwnmsac_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv4f32_nxv4f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu -; CHECK-NEXT: vfwnmsac.vv v8, v10, v11 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwnmsac.nxv4f32.nxv4f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwnmsac.mask.nxv4f32.nxv4f16( - , - , - , - , - i64); - -define @intrinsic_vfwnmsac_mask_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv4f32_nxv4f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu -; CHECK-NEXT: vfwnmsac.vv v8, v10, v11, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwnmsac.mask.nxv4f32.nxv4f16( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwnmsac.nxv8f32.nxv8f16( - , - , - , - i64); - -define @intrinsic_vfwnmsac_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv8f32_nxv8f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu -; CHECK-NEXT: vfwnmsac.vv v8, v12, v14 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwnmsac.nxv8f32.nxv8f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwnmsac.mask.nxv8f32.nxv8f16( - , - , - , - , - i64); - -define @intrinsic_vfwnmsac_mask_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv8f32_nxv8f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu -; CHECK-NEXT: vfwnmsac.vv v8, v12, v14, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwnmsac.mask.nxv8f32.nxv8f16( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwnmsac.nxv16f32.nxv16f16( - , - , - , - i64); - -define @intrinsic_vfwnmsac_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv16f32_nxv16f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu -; CHECK-NEXT: vfwnmsac.vv v8, v16, v20 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwnmsac.nxv16f32.nxv16f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwnmsac.mask.nxv16f32.nxv16f16( - , - , - , - , - i64); - -define @intrinsic_vfwnmsac_mask_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv16f32_nxv16f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu -; CHECK-NEXT: vfwnmsac.vv v8, v16, v20, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwnmsac.mask.nxv16f32.nxv16f16( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwnmsac.nxv1f64.nxv1f32( - , - , - , - i64); - -define @intrinsic_vfwnmsac_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv1f64_nxv1f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu -; CHECK-NEXT: vfwnmsac.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwnmsac.nxv1f64.nxv1f32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwnmsac.mask.nxv1f64.nxv1f32( - , - , - , - , - i64); - -define @intrinsic_vfwnmsac_mask_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv1f64_nxv1f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu -; CHECK-NEXT: vfwnmsac.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwnmsac.mask.nxv1f64.nxv1f32( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwnmsac.nxv2f64.nxv2f32( - , - , - , - i64); - -define @intrinsic_vfwnmsac_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv2f64_nxv2f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu -; CHECK-NEXT: vfwnmsac.vv v8, v10, v11 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwnmsac.nxv2f64.nxv2f32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwnmsac.mask.nxv2f64.nxv2f32( - , - , - , - , - i64); - -define @intrinsic_vfwnmsac_mask_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv2f64_nxv2f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu -; CHECK-NEXT: vfwnmsac.vv v8, v10, v11, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwnmsac.mask.nxv2f64.nxv2f32( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwnmsac.nxv4f64.nxv4f32( - , - , - , - i64); - -define @intrinsic_vfwnmsac_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv4f64_nxv4f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu -; CHECK-NEXT: vfwnmsac.vv v8, v12, v14 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwnmsac.nxv4f64.nxv4f32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwnmsac.mask.nxv4f64.nxv4f32( - , - , - , - , - i64); - -define @intrinsic_vfwnmsac_mask_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv4f64_nxv4f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu -; CHECK-NEXT: vfwnmsac.vv v8, v12, v14, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwnmsac.mask.nxv4f64.nxv4f32( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwnmsac.nxv8f64.nxv8f32( - , - , - , - i64); - -define @intrinsic_vfwnmsac_vv_nxv8f64_nxv8f32_nxv8f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv8f64_nxv8f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu -; CHECK-NEXT: vfwnmsac.vv v8, v16, v20 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwnmsac.nxv8f64.nxv8f32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwnmsac.mask.nxv8f64.nxv8f32( - , - , - , - , - i64); - -define @intrinsic_vfwnmsac_mask_vv_nxv8f64_nxv8f32_nxv8f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv8f64_nxv8f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu -; CHECK-NEXT: vfwnmsac.vv v8, v16, v20, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwnmsac.mask.nxv8f64.nxv8f32( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwnmsac.nxv1f32.f16( - , - half, - , - i64); - -define @intrinsic_vfwnmsac_vf_nxv1f32_f16_nxv1f16( %0, half %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv1f32_f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu -; CHECK-NEXT: vfwnmsac.vf v8, fa0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwnmsac.nxv1f32.f16( - %0, - half %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwnmsac.mask.nxv1f32.f16( - , - half, - , - , - i64); - -define @intrinsic_vfwnmsac_mask_vf_nxv1f32_f16_nxv1f16( %0, half %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv1f32_f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu -; CHECK-NEXT: vfwnmsac.vf v8, fa0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwnmsac.mask.nxv1f32.f16( - %0, - half %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwnmsac.nxv2f32.f16( - , - half, - , - i64); - -define @intrinsic_vfwnmsac_vf_nxv2f32_f16_nxv2f16( %0, half %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv2f32_f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu -; CHECK-NEXT: vfwnmsac.vf v8, fa0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwnmsac.nxv2f32.f16( - %0, - half %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwnmsac.mask.nxv2f32.f16( - , - half, - , - , - i64); - -define @intrinsic_vfwnmsac_mask_vf_nxv2f32_f16_nxv2f16( %0, half %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv2f32_f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu -; CHECK-NEXT: vfwnmsac.vf v8, fa0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwnmsac.mask.nxv2f32.f16( - %0, - half %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwnmsac.nxv4f32.f16( - , - half, - , - i64); - -define @intrinsic_vfwnmsac_vf_nxv4f32_f16_nxv4f16( %0, half %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv4f32_f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu -; CHECK-NEXT: vfwnmsac.vf v8, fa0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwnmsac.nxv4f32.f16( - %0, - half %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwnmsac.mask.nxv4f32.f16( - , - half, - , - , - i64); - -define @intrinsic_vfwnmsac_mask_vf_nxv4f32_f16_nxv4f16( %0, half %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv4f32_f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu -; CHECK-NEXT: vfwnmsac.vf v8, fa0, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwnmsac.mask.nxv4f32.f16( - %0, - half %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwnmsac.nxv8f32.f16( - , - half, - , - i64); - -define @intrinsic_vfwnmsac_vf_nxv8f32_f16_nxv8f16( %0, half %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv8f32_f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu -; CHECK-NEXT: vfwnmsac.vf v8, fa0, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwnmsac.nxv8f32.f16( - %0, - half %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwnmsac.mask.nxv8f32.f16( - , - half, - , - , - i64); - -define @intrinsic_vfwnmsac_mask_vf_nxv8f32_f16_nxv8f16( %0, half %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv8f32_f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu -; CHECK-NEXT: vfwnmsac.vf v8, fa0, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwnmsac.mask.nxv8f32.f16( - %0, - half %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwnmsac.nxv16f32.f16( - , - half, - , - i64); - -define @intrinsic_vfwnmsac_vf_nxv16f32_f16_nxv16f16( %0, half %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv16f32_f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu -; CHECK-NEXT: vfwnmsac.vf v8, fa0, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwnmsac.nxv16f32.f16( - %0, - half %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwnmsac.mask.nxv16f32.f16( - , - half, - , - , - i64); - -define @intrinsic_vfwnmsac_mask_vf_nxv16f32_f16_nxv16f16( %0, half %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv16f32_f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu -; CHECK-NEXT: vfwnmsac.vf v8, fa0, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwnmsac.mask.nxv16f32.f16( - %0, - half %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwnmsac.nxv1f64.f32( - , - float, - , - i64); - -define @intrinsic_vfwnmsac_vf_nxv1f64_f32_nxv1f32( %0, float %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv1f64_f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu -; CHECK-NEXT: vfwnmsac.vf v8, fa0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwnmsac.nxv1f64.f32( - %0, - float %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwnmsac.mask.nxv1f64.f32( - , - float, - , - , - i64); - -define @intrinsic_vfwnmsac_mask_vf_nxv1f64_f32_nxv1f32( %0, float %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv1f64_f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu -; CHECK-NEXT: vfwnmsac.vf v8, fa0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwnmsac.mask.nxv1f64.f32( - %0, - float %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwnmsac.nxv2f64.f32( - , - float, - , - i64); - -define @intrinsic_vfwnmsac_vf_nxv2f64_f32_nxv2f32( %0, float %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv2f64_f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu -; CHECK-NEXT: vfwnmsac.vf v8, fa0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwnmsac.nxv2f64.f32( - %0, - float %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwnmsac.mask.nxv2f64.f32( - , - float, - , - , - i64); - -define @intrinsic_vfwnmsac_mask_vf_nxv2f64_f32_nxv2f32( %0, float %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv2f64_f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu -; CHECK-NEXT: vfwnmsac.vf v8, fa0, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwnmsac.mask.nxv2f64.f32( - %0, - float %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwnmsac.nxv4f64.f32( - , - float, - , - i64); - -define @intrinsic_vfwnmsac_vf_nxv4f64_f32_nxv4f32( %0, float %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv4f64_f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu -; CHECK-NEXT: vfwnmsac.vf v8, fa0, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwnmsac.nxv4f64.f32( - %0, - float %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwnmsac.mask.nxv4f64.f32( - , - float, - , - , - i64); - -define @intrinsic_vfwnmsac_mask_vf_nxv4f64_f32_nxv4f32( %0, float %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv4f64_f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu -; CHECK-NEXT: vfwnmsac.vf v8, fa0, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwnmsac.mask.nxv4f64.f32( - %0, - float %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwnmsac.nxv8f64.f32( - , - float, - , - i64); - -define @intrinsic_vfwnmsac_vf_nxv8f64_f32_nxv8f32( %0, float %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv8f64_f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu -; CHECK-NEXT: vfwnmsac.vf v8, fa0, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwnmsac.nxv8f64.f32( - %0, - float %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwnmsac.mask.nxv8f64.f32( - , - float, - , - , - i64); - -define @intrinsic_vfwnmsac_mask_vf_nxv8f64_f32_nxv8f32( %0, float %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv8f64_f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu -; CHECK-NEXT: vfwnmsac.vf v8, fa0, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwnmsac.mask.nxv8f64.f32( - %0, - float %1, - %2, - %3, - i64 %4) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwnmsac-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwnmsac.ll similarity index 89% rename from llvm/test/CodeGen/RISCV/rvv/vfwnmsac-rv32.ll rename to llvm/test/CodeGen/RISCV/rvv/vfwnmsac.ll index fe9683ed15adba776d454b3797329a06fda9b18b..26fcb06d89167201ebe49dd7674c5700204e75bc 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfwnmsac-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwnmsac.ll @@ -1,13 +1,15 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: -target-abi=ilp32d < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfwnmsac.nxv1f32.nxv1f16( , , , - i32); + iXLen); -define @intrinsic_vfwnmsac_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwnmsac_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv1f32_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu @@ -18,7 +20,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -28,9 +30,9 @@ declare @llvm.riscv.vfwnmsac.mask.nxv1f32.nxv1f16( , , , - i32); + iXLen); -define @intrinsic_vfwnmsac_mask_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwnmsac_mask_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv1f32_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu @@ -42,7 +44,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -51,9 +53,9 @@ declare @llvm.riscv.vfwnmsac.nxv2f32.nxv2f16( , , , - i32); + iXLen); -define @intrinsic_vfwnmsac_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwnmsac_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv2f32_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu @@ -64,7 +66,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -74,9 +76,9 @@ declare @llvm.riscv.vfwnmsac.mask.nxv2f32.nxv2f16( , , , - i32); + iXLen); -define @intrinsic_vfwnmsac_mask_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwnmsac_mask_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv2f32_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu @@ -88,7 +90,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -97,9 +99,9 @@ declare @llvm.riscv.vfwnmsac.nxv4f32.nxv4f16( , , , - i32); + iXLen); -define @intrinsic_vfwnmsac_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwnmsac_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv4f32_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu @@ -110,7 +112,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -120,9 +122,9 @@ declare @llvm.riscv.vfwnmsac.mask.nxv4f32.nxv4f16( , , , - i32); + iXLen); -define @intrinsic_vfwnmsac_mask_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwnmsac_mask_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv4f32_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu @@ -134,7 +136,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -143,9 +145,9 @@ declare @llvm.riscv.vfwnmsac.nxv8f32.nxv8f16( , , , - i32); + iXLen); -define @intrinsic_vfwnmsac_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwnmsac_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv8f32_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu @@ -156,7 +158,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -166,9 +168,9 @@ declare @llvm.riscv.vfwnmsac.mask.nxv8f32.nxv8f16( , , , - i32); + iXLen); -define @intrinsic_vfwnmsac_mask_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwnmsac_mask_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv8f32_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu @@ -180,7 +182,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -189,9 +191,9 @@ declare @llvm.riscv.vfwnmsac.nxv16f32.nxv16f16( , , , - i32); + iXLen); -define @intrinsic_vfwnmsac_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwnmsac_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv16f32_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu @@ -202,7 +204,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -212,9 +214,9 @@ declare @llvm.riscv.vfwnmsac.mask.nxv16f32.nxv16f16( , , , - i32); + iXLen); -define @intrinsic_vfwnmsac_mask_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwnmsac_mask_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv16f32_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu @@ -226,7 +228,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -235,9 +237,9 @@ declare @llvm.riscv.vfwnmsac.nxv1f64.nxv1f32( , , , - i32); + iXLen); -define @intrinsic_vfwnmsac_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwnmsac_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv1f64_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu @@ -248,7 +250,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -258,9 +260,9 @@ declare @llvm.riscv.vfwnmsac.mask.nxv1f64.nxv1f32( , , , - i32); + iXLen); -define @intrinsic_vfwnmsac_mask_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwnmsac_mask_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv1f64_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu @@ -272,7 +274,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -281,9 +283,9 @@ declare @llvm.riscv.vfwnmsac.nxv2f64.nxv2f32( , , , - i32); + iXLen); -define @intrinsic_vfwnmsac_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwnmsac_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv2f64_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu @@ -294,7 +296,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -304,9 +306,9 @@ declare @llvm.riscv.vfwnmsac.mask.nxv2f64.nxv2f32( , , , - i32); + iXLen); -define @intrinsic_vfwnmsac_mask_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwnmsac_mask_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv2f64_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu @@ -318,7 +320,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -327,9 +329,9 @@ declare @llvm.riscv.vfwnmsac.nxv4f64.nxv4f32( , , , - i32); + iXLen); -define @intrinsic_vfwnmsac_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwnmsac_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv4f64_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu @@ -340,7 +342,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -350,9 +352,9 @@ declare @llvm.riscv.vfwnmsac.mask.nxv4f64.nxv4f32( , , , - i32); + iXLen); -define @intrinsic_vfwnmsac_mask_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwnmsac_mask_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv4f64_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu @@ -364,7 +366,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -373,9 +375,9 @@ declare @llvm.riscv.vfwnmsac.nxv8f64.nxv8f32( , , , - i32); + iXLen); -define @intrinsic_vfwnmsac_vv_nxv8f64_nxv8f32_nxv8f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwnmsac_vv_nxv8f64_nxv8f32_nxv8f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv8f64_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu @@ -386,7 +388,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -396,9 +398,9 @@ declare @llvm.riscv.vfwnmsac.mask.nxv8f64.nxv8f32( , , , - i32); + iXLen); -define @intrinsic_vfwnmsac_mask_vv_nxv8f64_nxv8f32_nxv8f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwnmsac_mask_vv_nxv8f64_nxv8f32_nxv8f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv8f64_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu @@ -410,7 +412,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -419,9 +421,9 @@ declare @llvm.riscv.vfwnmsac.nxv1f32.f16( , half, , - i32); + iXLen); -define @intrinsic_vfwnmsac_vf_nxv1f32_f16_nxv1f16( %0, half %1, %2, i32 %3) nounwind { +define @intrinsic_vfwnmsac_vf_nxv1f32_f16_nxv1f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv1f32_f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu @@ -432,7 +434,7 @@ entry: %0, half %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -442,9 +444,9 @@ declare @llvm.riscv.vfwnmsac.mask.nxv1f32.f16( half, , , - i32); + iXLen); -define @intrinsic_vfwnmsac_mask_vf_nxv1f32_f16_nxv1f16( %0, half %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwnmsac_mask_vf_nxv1f32_f16_nxv1f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv1f32_f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu @@ -456,7 +458,7 @@ entry: half %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -465,9 +467,9 @@ declare @llvm.riscv.vfwnmsac.nxv2f32.f16( , half, , - i32); + iXLen); -define @intrinsic_vfwnmsac_vf_nxv2f32_f16_nxv2f16( %0, half %1, %2, i32 %3) nounwind { +define @intrinsic_vfwnmsac_vf_nxv2f32_f16_nxv2f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv2f32_f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu @@ -478,7 +480,7 @@ entry: %0, half %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -488,9 +490,9 @@ declare @llvm.riscv.vfwnmsac.mask.nxv2f32.f16( half, , , - i32); + iXLen); -define @intrinsic_vfwnmsac_mask_vf_nxv2f32_f16_nxv2f16( %0, half %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwnmsac_mask_vf_nxv2f32_f16_nxv2f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv2f32_f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu @@ -502,7 +504,7 @@ entry: half %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -511,9 +513,9 @@ declare @llvm.riscv.vfwnmsac.nxv4f32.f16( , half, , - i32); + iXLen); -define @intrinsic_vfwnmsac_vf_nxv4f32_f16_nxv4f16( %0, half %1, %2, i32 %3) nounwind { +define @intrinsic_vfwnmsac_vf_nxv4f32_f16_nxv4f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv4f32_f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu @@ -524,7 +526,7 @@ entry: %0, half %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -534,9 +536,9 @@ declare @llvm.riscv.vfwnmsac.mask.nxv4f32.f16( half, , , - i32); + iXLen); -define @intrinsic_vfwnmsac_mask_vf_nxv4f32_f16_nxv4f16( %0, half %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwnmsac_mask_vf_nxv4f32_f16_nxv4f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv4f32_f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu @@ -548,7 +550,7 @@ entry: half %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -557,9 +559,9 @@ declare @llvm.riscv.vfwnmsac.nxv8f32.f16( , half, , - i32); + iXLen); -define @intrinsic_vfwnmsac_vf_nxv8f32_f16_nxv8f16( %0, half %1, %2, i32 %3) nounwind { +define @intrinsic_vfwnmsac_vf_nxv8f32_f16_nxv8f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv8f32_f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu @@ -570,7 +572,7 @@ entry: %0, half %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -580,9 +582,9 @@ declare @llvm.riscv.vfwnmsac.mask.nxv8f32.f16( half, , , - i32); + iXLen); -define @intrinsic_vfwnmsac_mask_vf_nxv8f32_f16_nxv8f16( %0, half %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwnmsac_mask_vf_nxv8f32_f16_nxv8f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv8f32_f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu @@ -594,7 +596,7 @@ entry: half %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -603,9 +605,9 @@ declare @llvm.riscv.vfwnmsac.nxv16f32.f16( , half, , - i32); + iXLen); -define @intrinsic_vfwnmsac_vf_nxv16f32_f16_nxv16f16( %0, half %1, %2, i32 %3) nounwind { +define @intrinsic_vfwnmsac_vf_nxv16f32_f16_nxv16f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv16f32_f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu @@ -616,7 +618,7 @@ entry: %0, half %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -626,9 +628,9 @@ declare @llvm.riscv.vfwnmsac.mask.nxv16f32.f16( half, , , - i32); + iXLen); -define @intrinsic_vfwnmsac_mask_vf_nxv16f32_f16_nxv16f16( %0, half %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwnmsac_mask_vf_nxv16f32_f16_nxv16f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv16f32_f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu @@ -640,7 +642,7 @@ entry: half %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -649,9 +651,9 @@ declare @llvm.riscv.vfwnmsac.nxv1f64.f32( , float, , - i32); + iXLen); -define @intrinsic_vfwnmsac_vf_nxv1f64_f32_nxv1f32( %0, float %1, %2, i32 %3) nounwind { +define @intrinsic_vfwnmsac_vf_nxv1f64_f32_nxv1f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv1f64_f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu @@ -662,7 +664,7 @@ entry: %0, float %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -672,9 +674,9 @@ declare @llvm.riscv.vfwnmsac.mask.nxv1f64.f32( float, , , - i32); + iXLen); -define @intrinsic_vfwnmsac_mask_vf_nxv1f64_f32_nxv1f32( %0, float %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwnmsac_mask_vf_nxv1f64_f32_nxv1f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv1f64_f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu @@ -686,7 +688,7 @@ entry: float %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -695,9 +697,9 @@ declare @llvm.riscv.vfwnmsac.nxv2f64.f32( , float, , - i32); + iXLen); -define @intrinsic_vfwnmsac_vf_nxv2f64_f32_nxv2f32( %0, float %1, %2, i32 %3) nounwind { +define @intrinsic_vfwnmsac_vf_nxv2f64_f32_nxv2f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv2f64_f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu @@ -708,7 +710,7 @@ entry: %0, float %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -718,9 +720,9 @@ declare @llvm.riscv.vfwnmsac.mask.nxv2f64.f32( float, , , - i32); + iXLen); -define @intrinsic_vfwnmsac_mask_vf_nxv2f64_f32_nxv2f32( %0, float %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwnmsac_mask_vf_nxv2f64_f32_nxv2f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv2f64_f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu @@ -732,7 +734,7 @@ entry: float %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -741,9 +743,9 @@ declare @llvm.riscv.vfwnmsac.nxv4f64.f32( , float, , - i32); + iXLen); -define @intrinsic_vfwnmsac_vf_nxv4f64_f32_nxv4f32( %0, float %1, %2, i32 %3) nounwind { +define @intrinsic_vfwnmsac_vf_nxv4f64_f32_nxv4f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv4f64_f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu @@ -754,7 +756,7 @@ entry: %0, float %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -764,9 +766,9 @@ declare @llvm.riscv.vfwnmsac.mask.nxv4f64.f32( float, , , - i32); + iXLen); -define @intrinsic_vfwnmsac_mask_vf_nxv4f64_f32_nxv4f32( %0, float %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwnmsac_mask_vf_nxv4f64_f32_nxv4f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv4f64_f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu @@ -778,7 +780,7 @@ entry: float %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -787,9 +789,9 @@ declare @llvm.riscv.vfwnmsac.nxv8f64.f32( , float, , - i32); + iXLen); -define @intrinsic_vfwnmsac_vf_nxv8f64_f32_nxv8f32( %0, float %1, %2, i32 %3) nounwind { +define @intrinsic_vfwnmsac_vf_nxv8f64_f32_nxv8f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv8f64_f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu @@ -800,7 +802,7 @@ entry: %0, float %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -810,9 +812,9 @@ declare @llvm.riscv.vfwnmsac.mask.nxv8f64.f32( float, , , - i32); + iXLen); -define @intrinsic_vfwnmsac_mask_vf_nxv8f64_f32_nxv8f32( %0, float %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwnmsac_mask_vf_nxv8f64_f32_nxv8f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv8f64_f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu @@ -824,7 +826,7 @@ entry: float %1, %2, %3, - i32 %4) + iXLen %4) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwredosum-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwredosum-rv64.ll deleted file mode 100644 index 2282bd5fbc8609dc9173e1b75e083d37fd4cdcaa..0000000000000000000000000000000000000000 --- a/llvm/test/CodeGen/RISCV/rvv/vfwredosum-rv64.ll +++ /dev/null @@ -1,508 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s -declare @llvm.riscv.vfwredosum.nxv2f32.nxv1f16( - , - , - , - i64); - -define @intrinsic_vfwredosum_vs_nxv2f32_nxv1f16_nxv2f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv2f32_nxv1f16_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu -; CHECK-NEXT: vfwredosum.vs v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwredosum.nxv2f32.nxv1f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwredosum.mask.nxv2f32.nxv1f16.nxv2f32( - , - , - , - , - i64); - -define @intrinsic_vfwredosum_mask_vs_nxv2f32_nxv1f16_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv2f32_nxv1f16_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu -; CHECK-NEXT: vfwredosum.vs v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwredosum.mask.nxv2f32.nxv1f16.nxv2f32( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwredosum.nxv2f32.nxv2f16( - , - , - , - i64); - -define @intrinsic_vfwredosum_vs_nxv2f32_nxv2f16_nxv2f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv2f32_nxv2f16_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu -; CHECK-NEXT: vfwredosum.vs v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwredosum.nxv2f32.nxv2f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwredosum.mask.nxv2f32.nxv2f16.nxv2f32( - , - , - , - , - i64); - -define @intrinsic_vfwredosum_mask_vs_nxv2f32_nxv2f16_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv2f32_nxv2f16_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu -; CHECK-NEXT: vfwredosum.vs v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwredosum.mask.nxv2f32.nxv2f16.nxv2f32( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwredosum.nxv2f32.nxv4f16( - , - , - , - i64); - -define @intrinsic_vfwredosum_vs_nxv2f32_nxv4f16_nxv2f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv2f32_nxv4f16_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu -; CHECK-NEXT: vfwredosum.vs v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwredosum.nxv2f32.nxv4f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwredosum.mask.nxv2f32.nxv4f16.nxv2f32( - , - , - , - , - i64); - -define @intrinsic_vfwredosum_mask_vs_nxv2f32_nxv4f16_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv2f32_nxv4f16_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu -; CHECK-NEXT: vfwredosum.vs v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwredosum.mask.nxv2f32.nxv4f16.nxv2f32( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwredosum.nxv2f32.nxv8f16( - , - , - , - i64); - -define @intrinsic_vfwredosum_vs_nxv2f32_nxv8f16_nxv2f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv2f32_nxv8f16_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu -; CHECK-NEXT: vfwredosum.vs v8, v10, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwredosum.nxv2f32.nxv8f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwredosum.mask.nxv2f32.nxv8f16.nxv2f32( - , - , - , - , - i64); - -define @intrinsic_vfwredosum_mask_vs_nxv2f32_nxv8f16_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv2f32_nxv8f16_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu -; CHECK-NEXT: vfwredosum.vs v8, v10, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwredosum.mask.nxv2f32.nxv8f16.nxv2f32( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwredosum.nxv2f32.nxv16f16( - , - , - , - i64); - -define @intrinsic_vfwredosum_vs_nxv2f32_nxv16f16_nxv2f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv2f32_nxv16f16_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu -; CHECK-NEXT: vfwredosum.vs v8, v12, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwredosum.nxv2f32.nxv16f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwredosum.mask.nxv2f32.nxv16f16.nxv2f32( - , - , - , - , - i64); - -define @intrinsic_vfwredosum_mask_vs_nxv2f32_nxv16f16_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv2f32_nxv16f16_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu -; CHECK-NEXT: vfwredosum.vs v8, v12, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwredosum.mask.nxv2f32.nxv16f16.nxv2f32( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwredosum.nxv2f32.nxv32f16( - , - , - , - i64); - -define @intrinsic_vfwredosum_vs_nxv2f32_nxv32f16_nxv2f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv2f32_nxv32f16_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu -; CHECK-NEXT: vfwredosum.vs v8, v16, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwredosum.nxv2f32.nxv32f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwredosum.mask.nxv2f32.nxv32f16( - , - , - , - , - i64); - -define @intrinsic_vfwredosum_mask_vs_nxv2f32_nxv32f16_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv2f32_nxv32f16_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu -; CHECK-NEXT: vfwredosum.vs v8, v16, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwredosum.mask.nxv2f32.nxv32f16( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwredosum.nxv1f64.nxv1f32( - , - , - , - i64); - -define @intrinsic_vfwredosum_vs_nxv1f64_nxv1f32_nxv1f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv1f64_nxv1f32_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu -; CHECK-NEXT: vfwredosum.vs v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwredosum.nxv1f64.nxv1f32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwredosum.mask.nxv1f64.nxv1f32.nxv1f64( - , - , - , - , - i64); - -define @intrinsic_vfwredosum_mask_vs_nxv1f64_nxv1f32_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv1f64_nxv1f32_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu -; CHECK-NEXT: vfwredosum.vs v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwredosum.mask.nxv1f64.nxv1f32.nxv1f64( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwredosum.nxv1f64.nxv2f32( - , - , - , - i64); - -define @intrinsic_vfwredosum_vs_nxv1f64_nxv2f32_nxv1f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv1f64_nxv2f32_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu -; CHECK-NEXT: vfwredosum.vs v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwredosum.nxv1f64.nxv2f32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwredosum.mask.nxv1f64.nxv2f32.nxv1f64( - , - , - , - , - i64); - -define @intrinsic_vfwredosum_mask_vs_nxv1f64_nxv2f32_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv1f64_nxv2f32_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu -; CHECK-NEXT: vfwredosum.vs v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwredosum.mask.nxv1f64.nxv2f32.nxv1f64( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwredosum.nxv1f64.nxv4f32( - , - , - , - i64); - -define @intrinsic_vfwredosum_vs_nxv1f64_nxv4f32_nxv1f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv1f64_nxv4f32_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu -; CHECK-NEXT: vfwredosum.vs v8, v10, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwredosum.nxv1f64.nxv4f32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwredosum.mask.nxv1f64.nxv4f32.nxv1f64( - , - , - , - , - i64); - -define @intrinsic_vfwredosum_mask_vs_nxv1f64_nxv4f32_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv1f64_nxv4f32_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu -; CHECK-NEXT: vfwredosum.vs v8, v10, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwredosum.mask.nxv1f64.nxv4f32.nxv1f64( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwredosum.nxv1f64.nxv8f32( - , - , - , - i64); - -define @intrinsic_vfwredosum_vs_nxv1f64_nxv8f32_nxv1f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv1f64_nxv8f32_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu -; CHECK-NEXT: vfwredosum.vs v8, v12, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwredosum.nxv1f64.nxv8f32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwredosum.mask.nxv1f64.nxv8f32.nxv1f64( - , - , - , - , - i64); - -define @intrinsic_vfwredosum_mask_vs_nxv1f64_nxv8f32_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv1f64_nxv8f32_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu -; CHECK-NEXT: vfwredosum.vs v8, v12, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwredosum.mask.nxv1f64.nxv8f32.nxv1f64( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwredosum.nxv1f64.nxv16f32( - , - , - , - i64); - -define @intrinsic_vfwredosum_vs_nxv1f64_nxv16f32_nxv1f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv1f64_nxv16f32_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu -; CHECK-NEXT: vfwredosum.vs v8, v16, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwredosum.nxv1f64.nxv16f32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwredosum.mask.nxv1f64.nxv16f32( - , - , - , - , - i64); - -define @intrinsic_vfwredosum_mask_vs_nxv1f64_nxv16f32_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv1f64_nxv16f32_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu -; CHECK-NEXT: vfwredosum.vs v8, v16, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwredosum.mask.nxv1f64.nxv16f32( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwredosum-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwredosum.ll similarity index 88% rename from llvm/test/CodeGen/RISCV/rvv/vfwredosum-rv32.ll rename to llvm/test/CodeGen/RISCV/rvv/vfwredosum.ll index 37240159bd908fc944f0d0e85dbdc10dec2bb45f..7eb16dd4b8a80dd77fad5e893c9885a6b1549fa3 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfwredosum-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwredosum.ll @@ -1,13 +1,15 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfwredosum.nxv2f32.nxv1f16( , , , - i32); + iXLen); -define @intrinsic_vfwredosum_vs_nxv2f32_nxv1f16_nxv2f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwredosum_vs_nxv2f32_nxv1f16_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv2f32_nxv1f16_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu @@ -18,7 +20,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -28,9 +30,9 @@ declare @llvm.riscv.vfwredosum.mask.nxv2f32.nxv1f16.nxv2f32 , , , - i32); + iXLen); -define @intrinsic_vfwredosum_mask_vs_nxv2f32_nxv1f16_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwredosum_mask_vs_nxv2f32_nxv1f16_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv2f32_nxv1f16_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu @@ -42,7 +44,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -51,9 +53,9 @@ declare @llvm.riscv.vfwredosum.nxv2f32.nxv2f16( , , , - i32); + iXLen); -define @intrinsic_vfwredosum_vs_nxv2f32_nxv2f16_nxv2f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwredosum_vs_nxv2f32_nxv2f16_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv2f32_nxv2f16_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu @@ -64,7 +66,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -74,9 +76,9 @@ declare @llvm.riscv.vfwredosum.mask.nxv2f32.nxv2f16.nxv2f32 , , , - i32); + iXLen); -define @intrinsic_vfwredosum_mask_vs_nxv2f32_nxv2f16_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwredosum_mask_vs_nxv2f32_nxv2f16_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv2f32_nxv2f16_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu @@ -88,7 +90,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -97,9 +99,9 @@ declare @llvm.riscv.vfwredosum.nxv2f32.nxv4f16( , , , - i32); + iXLen); -define @intrinsic_vfwredosum_vs_nxv2f32_nxv4f16_nxv2f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwredosum_vs_nxv2f32_nxv4f16_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv2f32_nxv4f16_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu @@ -110,7 +112,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -120,9 +122,9 @@ declare @llvm.riscv.vfwredosum.mask.nxv2f32.nxv4f16.nxv2f32 , , , - i32); + iXLen); -define @intrinsic_vfwredosum_mask_vs_nxv2f32_nxv4f16_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwredosum_mask_vs_nxv2f32_nxv4f16_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv2f32_nxv4f16_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu @@ -134,7 +136,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -143,9 +145,9 @@ declare @llvm.riscv.vfwredosum.nxv2f32.nxv8f16( , , , - i32); + iXLen); -define @intrinsic_vfwredosum_vs_nxv2f32_nxv8f16_nxv2f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwredosum_vs_nxv2f32_nxv8f16_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv2f32_nxv8f16_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu @@ -156,7 +158,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -166,9 +168,9 @@ declare @llvm.riscv.vfwredosum.mask.nxv2f32.nxv8f16.nxv2f32 , , , - i32); + iXLen); -define @intrinsic_vfwredosum_mask_vs_nxv2f32_nxv8f16_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwredosum_mask_vs_nxv2f32_nxv8f16_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv2f32_nxv8f16_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu @@ -180,7 +182,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -189,9 +191,9 @@ declare @llvm.riscv.vfwredosum.nxv2f32.nxv16f16( , , , - i32); + iXLen); -define @intrinsic_vfwredosum_vs_nxv2f32_nxv16f16_nxv2f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwredosum_vs_nxv2f32_nxv16f16_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv2f32_nxv16f16_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu @@ -202,7 +204,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -212,9 +214,9 @@ declare @llvm.riscv.vfwredosum.mask.nxv2f32.nxv16f16.nxv2f3 , , , - i32); + iXLen); -define @intrinsic_vfwredosum_mask_vs_nxv2f32_nxv16f16_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwredosum_mask_vs_nxv2f32_nxv16f16_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv2f32_nxv16f16_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu @@ -226,7 +228,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -235,9 +237,9 @@ declare @llvm.riscv.vfwredosum.nxv2f32.nxv32f16( , , , - i32); + iXLen); -define @intrinsic_vfwredosum_vs_nxv2f32_nxv32f16_nxv2f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwredosum_vs_nxv2f32_nxv32f16_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv2f32_nxv32f16_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu @@ -248,7 +250,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -258,9 +260,9 @@ declare @llvm.riscv.vfwredosum.mask.nxv2f32.nxv32f16( , , , - i32); + iXLen); -define @intrinsic_vfwredosum_mask_vs_nxv2f32_nxv32f16_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwredosum_mask_vs_nxv2f32_nxv32f16_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv2f32_nxv32f16_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu @@ -272,7 +274,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -281,9 +283,9 @@ declare @llvm.riscv.vfwredosum.nxv1f64.nxv1f32( , , , - i32); + iXLen); -define @intrinsic_vfwredosum_vs_nxv1f64_nxv1f32_nxv1f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwredosum_vs_nxv1f64_nxv1f32_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv1f64_nxv1f32_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu @@ -294,7 +296,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -304,9 +306,9 @@ declare @llvm.riscv.vfwredosum.mask.nxv1f64.nxv1f32.nxv1f6 , , , - i32); + iXLen); -define @intrinsic_vfwredosum_mask_vs_nxv1f64_nxv1f32_nxv1f64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwredosum_mask_vs_nxv1f64_nxv1f32_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv1f64_nxv1f32_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu @@ -318,7 +320,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -327,9 +329,9 @@ declare @llvm.riscv.vfwredosum.nxv1f64.nxv2f32( , , , - i32); + iXLen); -define @intrinsic_vfwredosum_vs_nxv1f64_nxv2f32_nxv1f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwredosum_vs_nxv1f64_nxv2f32_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv1f64_nxv2f32_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu @@ -340,7 +342,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -350,9 +352,9 @@ declare @llvm.riscv.vfwredosum.mask.nxv1f64.nxv2f32.nxv1f6 , , , - i32); + iXLen); -define @intrinsic_vfwredosum_mask_vs_nxv1f64_nxv2f32_nxv1f64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwredosum_mask_vs_nxv1f64_nxv2f32_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv1f64_nxv2f32_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu @@ -364,7 +366,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -373,9 +375,9 @@ declare @llvm.riscv.vfwredosum.nxv1f64.nxv4f32( , , , - i32); + iXLen); -define @intrinsic_vfwredosum_vs_nxv1f64_nxv4f32_nxv1f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwredosum_vs_nxv1f64_nxv4f32_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv1f64_nxv4f32_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu @@ -386,7 +388,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -396,9 +398,9 @@ declare @llvm.riscv.vfwredosum.mask.nxv1f64.nxv4f32.nxv1f6 , , , - i32); + iXLen); -define @intrinsic_vfwredosum_mask_vs_nxv1f64_nxv4f32_nxv1f64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwredosum_mask_vs_nxv1f64_nxv4f32_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv1f64_nxv4f32_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu @@ -410,7 +412,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -419,9 +421,9 @@ declare @llvm.riscv.vfwredosum.nxv1f64.nxv8f32( , , , - i32); + iXLen); -define @intrinsic_vfwredosum_vs_nxv1f64_nxv8f32_nxv1f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwredosum_vs_nxv1f64_nxv8f32_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv1f64_nxv8f32_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu @@ -432,7 +434,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -442,9 +444,9 @@ declare @llvm.riscv.vfwredosum.mask.nxv1f64.nxv8f32.nxv1f6 , , , - i32); + iXLen); -define @intrinsic_vfwredosum_mask_vs_nxv1f64_nxv8f32_nxv1f64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwredosum_mask_vs_nxv1f64_nxv8f32_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv1f64_nxv8f32_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu @@ -456,7 +458,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -465,9 +467,9 @@ declare @llvm.riscv.vfwredosum.nxv1f64.nxv16f32( , , , - i32); + iXLen); -define @intrinsic_vfwredosum_vs_nxv1f64_nxv16f32_nxv1f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwredosum_vs_nxv1f64_nxv16f32_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv1f64_nxv16f32_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu @@ -478,7 +480,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -488,9 +490,9 @@ declare @llvm.riscv.vfwredosum.mask.nxv1f64.nxv16f32.nxv1f , , , - i32); + iXLen); -define @intrinsic_vfwredosum_mask_vs_nxv1f64_nxv16f32_nxv1f64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwredosum_mask_vs_nxv1f64_nxv16f32_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv1f64_nxv16f32_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu @@ -502,7 +504,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwredusum-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwredusum-rv64.ll deleted file mode 100644 index 52bde877eb7d1a3f3b5ae50393719a26c28dc592..0000000000000000000000000000000000000000 --- a/llvm/test/CodeGen/RISCV/rvv/vfwredusum-rv64.ll +++ /dev/null @@ -1,508 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s -declare @llvm.riscv.vfwredusum.nxv2f32.nxv1f16( - , - , - , - i64); - -define @intrinsic_vfwredusum_vs_nxv2f32_nxv1f16_nxv2f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv2f32_nxv1f16_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu -; CHECK-NEXT: vfwredusum.vs v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwredusum.nxv2f32.nxv1f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwredusum.mask.nxv2f32.nxv1f16.nxv2f32( - , - , - , - , - i64); - -define @intrinsic_vfwredusum_mask_vs_nxv2f32_nxv1f16_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv2f32_nxv1f16_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu -; CHECK-NEXT: vfwredusum.vs v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwredusum.mask.nxv2f32.nxv1f16.nxv2f32( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwredusum.nxv2f32.nxv2f16( - , - , - , - i64); - -define @intrinsic_vfwredusum_vs_nxv2f32_nxv2f16_nxv2f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv2f32_nxv2f16_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu -; CHECK-NEXT: vfwredusum.vs v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwredusum.nxv2f32.nxv2f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwredusum.mask.nxv2f32.nxv2f16.nxv2f32( - , - , - , - , - i64); - -define @intrinsic_vfwredusum_mask_vs_nxv2f32_nxv2f16_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv2f32_nxv2f16_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu -; CHECK-NEXT: vfwredusum.vs v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwredusum.mask.nxv2f32.nxv2f16.nxv2f32( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwredusum.nxv2f32.nxv4f16( - , - , - , - i64); - -define @intrinsic_vfwredusum_vs_nxv2f32_nxv4f16_nxv2f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv2f32_nxv4f16_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu -; CHECK-NEXT: vfwredusum.vs v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwredusum.nxv2f32.nxv4f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwredusum.mask.nxv2f32.nxv4f16.nxv2f32( - , - , - , - , - i64); - -define @intrinsic_vfwredusum_mask_vs_nxv2f32_nxv4f16_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv2f32_nxv4f16_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu -; CHECK-NEXT: vfwredusum.vs v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwredusum.mask.nxv2f32.nxv4f16.nxv2f32( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwredusum.nxv2f32.nxv8f16( - , - , - , - i64); - -define @intrinsic_vfwredusum_vs_nxv2f32_nxv8f16_nxv2f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv2f32_nxv8f16_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu -; CHECK-NEXT: vfwredusum.vs v8, v10, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwredusum.nxv2f32.nxv8f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwredusum.mask.nxv2f32.nxv8f16.nxv2f32( - , - , - , - , - i64); - -define @intrinsic_vfwredusum_mask_vs_nxv2f32_nxv8f16_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv2f32_nxv8f16_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu -; CHECK-NEXT: vfwredusum.vs v8, v10, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwredusum.mask.nxv2f32.nxv8f16.nxv2f32( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwredusum.nxv2f32.nxv16f16( - , - , - , - i64); - -define @intrinsic_vfwredusum_vs_nxv2f32_nxv16f16_nxv2f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv2f32_nxv16f16_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu -; CHECK-NEXT: vfwredusum.vs v8, v12, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwredusum.nxv2f32.nxv16f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwredusum.mask.nxv2f32.nxv16f16.nxv2f32( - , - , - , - , - i64); - -define @intrinsic_vfwredusum_mask_vs_nxv2f32_nxv16f16_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv2f32_nxv16f16_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu -; CHECK-NEXT: vfwredusum.vs v8, v12, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwredusum.mask.nxv2f32.nxv16f16.nxv2f32( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwredusum.nxv2f32.nxv32f16( - , - , - , - i64); - -define @intrinsic_vfwredusum_vs_nxv2f32_nxv32f16_nxv2f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv2f32_nxv32f16_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu -; CHECK-NEXT: vfwredusum.vs v8, v16, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwredusum.nxv2f32.nxv32f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwredusum.mask.nxv2f32.nxv32f16( - , - , - , - , - i64); - -define @intrinsic_vfwredusum_mask_vs_nxv2f32_nxv32f16_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv2f32_nxv32f16_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu -; CHECK-NEXT: vfwredusum.vs v8, v16, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwredusum.mask.nxv2f32.nxv32f16( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwredusum.nxv1f64.nxv1f32( - , - , - , - i64); - -define @intrinsic_vfwredusum_vs_nxv1f64_nxv1f32_nxv1f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv1f64_nxv1f32_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu -; CHECK-NEXT: vfwredusum.vs v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwredusum.nxv1f64.nxv1f32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwredusum.mask.nxv1f64.nxv1f32.nxv1f64( - , - , - , - , - i64); - -define @intrinsic_vfwredusum_mask_vs_nxv1f64_nxv1f32_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv1f64_nxv1f32_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu -; CHECK-NEXT: vfwredusum.vs v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwredusum.mask.nxv1f64.nxv1f32.nxv1f64( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwredusum.nxv1f64.nxv2f32( - , - , - , - i64); - -define @intrinsic_vfwredusum_vs_nxv1f64_nxv2f32_nxv1f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv1f64_nxv2f32_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu -; CHECK-NEXT: vfwredusum.vs v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwredusum.nxv1f64.nxv2f32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwredusum.mask.nxv1f64.nxv2f32.nxv1f64( - , - , - , - , - i64); - -define @intrinsic_vfwredusum_mask_vs_nxv1f64_nxv2f32_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv1f64_nxv2f32_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu -; CHECK-NEXT: vfwredusum.vs v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwredusum.mask.nxv1f64.nxv2f32.nxv1f64( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwredusum.nxv1f64.nxv4f32( - , - , - , - i64); - -define @intrinsic_vfwredusum_vs_nxv1f64_nxv4f32_nxv1f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv1f64_nxv4f32_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu -; CHECK-NEXT: vfwredusum.vs v8, v10, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwredusum.nxv1f64.nxv4f32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwredusum.mask.nxv1f64.nxv4f32.nxv1f64( - , - , - , - , - i64); - -define @intrinsic_vfwredusum_mask_vs_nxv1f64_nxv4f32_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv1f64_nxv4f32_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu -; CHECK-NEXT: vfwredusum.vs v8, v10, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwredusum.mask.nxv1f64.nxv4f32.nxv1f64( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwredusum.nxv1f64.nxv8f32( - , - , - , - i64); - -define @intrinsic_vfwredusum_vs_nxv1f64_nxv8f32_nxv1f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv1f64_nxv8f32_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu -; CHECK-NEXT: vfwredusum.vs v8, v12, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwredusum.nxv1f64.nxv8f32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwredusum.mask.nxv1f64.nxv8f32.nxv1f64( - , - , - , - , - i64); - -define @intrinsic_vfwredusum_mask_vs_nxv1f64_nxv8f32_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv1f64_nxv8f32_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu -; CHECK-NEXT: vfwredusum.vs v8, v12, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwredusum.mask.nxv1f64.nxv8f32.nxv1f64( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwredusum.nxv1f64.nxv16f32( - , - , - , - i64); - -define @intrinsic_vfwredusum_vs_nxv1f64_nxv16f32_nxv1f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv1f64_nxv16f32_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu -; CHECK-NEXT: vfwredusum.vs v8, v16, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwredusum.nxv1f64.nxv16f32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwredusum.mask.nxv1f64.nxv16f32( - , - , - , - , - i64); - -define @intrinsic_vfwredusum_mask_vs_nxv1f64_nxv16f32_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv1f64_nxv16f32_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu -; CHECK-NEXT: vfwredusum.vs v8, v16, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwredusum.mask.nxv1f64.nxv16f32( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwredusum-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwredusum.ll similarity index 88% rename from llvm/test/CodeGen/RISCV/rvv/vfwredusum-rv32.ll rename to llvm/test/CodeGen/RISCV/rvv/vfwredusum.ll index fe56d0c6bd0d0bb1babdb799088cc6cf66101dd9..897cd61fb437a623a2747d752db85d21698d63e8 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfwredusum-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwredusum.ll @@ -1,13 +1,15 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfwredusum.nxv2f32.nxv1f16( , , , - i32); + iXLen); -define @intrinsic_vfwredusum_vs_nxv2f32_nxv1f16_nxv2f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwredusum_vs_nxv2f32_nxv1f16_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv2f32_nxv1f16_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu @@ -18,7 +20,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -28,9 +30,9 @@ declare @llvm.riscv.vfwredusum.mask.nxv2f32.nxv1f16.nxv2f32 , , , - i32); + iXLen); -define @intrinsic_vfwredusum_mask_vs_nxv2f32_nxv1f16_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwredusum_mask_vs_nxv2f32_nxv1f16_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv2f32_nxv1f16_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu @@ -42,7 +44,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -51,9 +53,9 @@ declare @llvm.riscv.vfwredusum.nxv2f32.nxv2f16( , , , - i32); + iXLen); -define @intrinsic_vfwredusum_vs_nxv2f32_nxv2f16_nxv2f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwredusum_vs_nxv2f32_nxv2f16_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv2f32_nxv2f16_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu @@ -64,7 +66,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -74,9 +76,9 @@ declare @llvm.riscv.vfwredusum.mask.nxv2f32.nxv2f16.nxv2f32 , , , - i32); + iXLen); -define @intrinsic_vfwredusum_mask_vs_nxv2f32_nxv2f16_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwredusum_mask_vs_nxv2f32_nxv2f16_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv2f32_nxv2f16_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu @@ -88,7 +90,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -97,9 +99,9 @@ declare @llvm.riscv.vfwredusum.nxv2f32.nxv4f16( , , , - i32); + iXLen); -define @intrinsic_vfwredusum_vs_nxv2f32_nxv4f16_nxv2f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwredusum_vs_nxv2f32_nxv4f16_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv2f32_nxv4f16_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu @@ -110,7 +112,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -120,9 +122,9 @@ declare @llvm.riscv.vfwredusum.mask.nxv2f32.nxv4f16.nxv2f32 , , , - i32); + iXLen); -define @intrinsic_vfwredusum_mask_vs_nxv2f32_nxv4f16_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwredusum_mask_vs_nxv2f32_nxv4f16_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv2f32_nxv4f16_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu @@ -134,7 +136,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -143,9 +145,9 @@ declare @llvm.riscv.vfwredusum.nxv2f32.nxv8f16( , , , - i32); + iXLen); -define @intrinsic_vfwredusum_vs_nxv2f32_nxv8f16_nxv2f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwredusum_vs_nxv2f32_nxv8f16_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv2f32_nxv8f16_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu @@ -156,7 +158,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -166,9 +168,9 @@ declare @llvm.riscv.vfwredusum.mask.nxv2f32.nxv8f16.nxv2f32 , , , - i32); + iXLen); -define @intrinsic_vfwredusum_mask_vs_nxv2f32_nxv8f16_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwredusum_mask_vs_nxv2f32_nxv8f16_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv2f32_nxv8f16_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu @@ -180,7 +182,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -189,9 +191,9 @@ declare @llvm.riscv.vfwredusum.nxv2f32.nxv16f16( , , , - i32); + iXLen); -define @intrinsic_vfwredusum_vs_nxv2f32_nxv16f16_nxv2f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwredusum_vs_nxv2f32_nxv16f16_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv2f32_nxv16f16_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu @@ -202,7 +204,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -212,9 +214,9 @@ declare @llvm.riscv.vfwredusum.mask.nxv2f32.nxv16f16.nxv2f3 , , , - i32); + iXLen); -define @intrinsic_vfwredusum_mask_vs_nxv2f32_nxv16f16_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwredusum_mask_vs_nxv2f32_nxv16f16_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv2f32_nxv16f16_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu @@ -226,7 +228,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -235,9 +237,9 @@ declare @llvm.riscv.vfwredusum.nxv2f32.nxv32f16( , , , - i32); + iXLen); -define @intrinsic_vfwredusum_vs_nxv2f32_nxv32f16_nxv2f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwredusum_vs_nxv2f32_nxv32f16_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv2f32_nxv32f16_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu @@ -248,7 +250,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -258,9 +260,9 @@ declare @llvm.riscv.vfwredusum.mask.nxv2f32.nxv32f16( , , , - i32); + iXLen); -define @intrinsic_vfwredusum_mask_vs_nxv2f32_nxv32f16_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwredusum_mask_vs_nxv2f32_nxv32f16_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv2f32_nxv32f16_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu @@ -272,7 +274,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -281,9 +283,9 @@ declare @llvm.riscv.vfwredusum.nxv1f64.nxv1f32( , , , - i32); + iXLen); -define @intrinsic_vfwredusum_vs_nxv1f64_nxv1f32_nxv1f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwredusum_vs_nxv1f64_nxv1f32_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv1f64_nxv1f32_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu @@ -294,7 +296,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -304,9 +306,9 @@ declare @llvm.riscv.vfwredusum.mask.nxv1f64.nxv1f32.nxv1f6 , , , - i32); + iXLen); -define @intrinsic_vfwredusum_mask_vs_nxv1f64_nxv1f32_nxv1f64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwredusum_mask_vs_nxv1f64_nxv1f32_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv1f64_nxv1f32_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu @@ -318,7 +320,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -327,9 +329,9 @@ declare @llvm.riscv.vfwredusum.nxv1f64.nxv2f32( , , , - i32); + iXLen); -define @intrinsic_vfwredusum_vs_nxv1f64_nxv2f32_nxv1f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwredusum_vs_nxv1f64_nxv2f32_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv1f64_nxv2f32_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu @@ -340,7 +342,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -350,9 +352,9 @@ declare @llvm.riscv.vfwredusum.mask.nxv1f64.nxv2f32.nxv1f6 , , , - i32); + iXLen); -define @intrinsic_vfwredusum_mask_vs_nxv1f64_nxv2f32_nxv1f64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwredusum_mask_vs_nxv1f64_nxv2f32_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv1f64_nxv2f32_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu @@ -364,7 +366,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -373,9 +375,9 @@ declare @llvm.riscv.vfwredusum.nxv1f64.nxv4f32( , , , - i32); + iXLen); -define @intrinsic_vfwredusum_vs_nxv1f64_nxv4f32_nxv1f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwredusum_vs_nxv1f64_nxv4f32_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv1f64_nxv4f32_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu @@ -386,7 +388,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -396,9 +398,9 @@ declare @llvm.riscv.vfwredusum.mask.nxv1f64.nxv4f32.nxv1f6 , , , - i32); + iXLen); -define @intrinsic_vfwredusum_mask_vs_nxv1f64_nxv4f32_nxv1f64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwredusum_mask_vs_nxv1f64_nxv4f32_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv1f64_nxv4f32_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu @@ -410,7 +412,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -419,9 +421,9 @@ declare @llvm.riscv.vfwredusum.nxv1f64.nxv8f32( , , , - i32); + iXLen); -define @intrinsic_vfwredusum_vs_nxv1f64_nxv8f32_nxv1f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwredusum_vs_nxv1f64_nxv8f32_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv1f64_nxv8f32_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu @@ -432,7 +434,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -442,9 +444,9 @@ declare @llvm.riscv.vfwredusum.mask.nxv1f64.nxv8f32.nxv1f6 , , , - i32); + iXLen); -define @intrinsic_vfwredusum_mask_vs_nxv1f64_nxv8f32_nxv1f64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwredusum_mask_vs_nxv1f64_nxv8f32_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv1f64_nxv8f32_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu @@ -456,7 +458,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -465,9 +467,9 @@ declare @llvm.riscv.vfwredusum.nxv1f64.nxv16f32( , , , - i32); + iXLen); -define @intrinsic_vfwredusum_vs_nxv1f64_nxv16f32_nxv1f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwredusum_vs_nxv1f64_nxv16f32_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv1f64_nxv16f32_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu @@ -478,7 +480,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -488,9 +490,9 @@ declare @llvm.riscv.vfwredusum.mask.nxv1f64.nxv16f32.nxv1f , , , - i32); + iXLen); -define @intrinsic_vfwredusum_mask_vs_nxv1f64_nxv16f32_nxv1f64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwredusum_mask_vs_nxv1f64_nxv16f32_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv1f64_nxv16f32_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu @@ -502,7 +504,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwsub-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwsub-rv64.ll deleted file mode 100644 index d4b0780f03c57c05389d6718e6de27d784dede48..0000000000000000000000000000000000000000 --- a/llvm/test/CodeGen/RISCV/rvv/vfwsub-rv64.ll +++ /dev/null @@ -1,830 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: -target-abi=lp64d < %s | FileCheck %s -declare @llvm.riscv.vfwsub.nxv1f32.nxv1f16.nxv1f16( - , - , - i64); - -define @intrinsic_vfwsub_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwsub_vv_nxv1f32_nxv1f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfwsub.vv v10, v8, v9 -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.nxv1f32.nxv1f16.nxv1f16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfwsub.mask.nxv1f32.nxv1f16.nxv1f16( - , - , - , - , - i64, - i64); - -define @intrinsic_vfwsub_mask_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv1f32_nxv1f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfwsub.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.mask.nxv1f32.nxv1f16.nxv1f16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwsub.nxv2f32.nxv2f16.nxv2f16( - , - , - i64); - -define @intrinsic_vfwsub_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwsub_vv_nxv2f32_nxv2f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfwsub.vv v10, v8, v9 -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.nxv2f32.nxv2f16.nxv2f16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfwsub.mask.nxv2f32.nxv2f16.nxv2f16( - , - , - , - , - i64, - i64); - -define @intrinsic_vfwsub_mask_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv2f32_nxv2f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfwsub.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.mask.nxv2f32.nxv2f16.nxv2f16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwsub.nxv4f32.nxv4f16.nxv4f16( - , - , - i64); - -define @intrinsic_vfwsub_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwsub_vv_nxv4f32_nxv4f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfwsub.vv v10, v8, v9 -; CHECK-NEXT: vmv2r.v v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.nxv4f32.nxv4f16.nxv4f16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfwsub.mask.nxv4f32.nxv4f16.nxv4f16( - , - , - , - , - i64, - i64); - -define @intrinsic_vfwsub_mask_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv4f32_nxv4f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfwsub.vv v8, v10, v11, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.mask.nxv4f32.nxv4f16.nxv4f16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwsub.nxv8f32.nxv8f16.nxv8f16( - , - , - i64); - -define @intrinsic_vfwsub_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwsub_vv_nxv8f32_nxv8f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfwsub.vv v12, v8, v10 -; CHECK-NEXT: vmv4r.v v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.nxv8f32.nxv8f16.nxv8f16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfwsub.mask.nxv8f32.nxv8f16.nxv8f16( - , - , - , - , - i64, - i64); - -define @intrinsic_vfwsub_mask_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv8f32_nxv8f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfwsub.vv v8, v12, v14, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.mask.nxv8f32.nxv8f16.nxv8f16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwsub.nxv16f32.nxv16f16.nxv16f16( - , - , - i64); - -define @intrinsic_vfwsub_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwsub_vv_nxv16f32_nxv16f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfwsub.vv v16, v8, v12 -; CHECK-NEXT: vmv8r.v v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.nxv16f32.nxv16f16.nxv16f16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfwsub.mask.nxv16f32.nxv16f16.nxv16f16( - , - , - , - , - i64, - i64); - -define @intrinsic_vfwsub_mask_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv16f32_nxv16f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfwsub.vv v8, v16, v20, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.mask.nxv16f32.nxv16f16.nxv16f16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwsub.nxv1f64.nxv1f32.nxv1f32( - , - , - i64); - -define @intrinsic_vfwsub_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwsub_vv_nxv1f64_nxv1f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfwsub.vv v10, v8, v9 -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.nxv1f64.nxv1f32.nxv1f32( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfwsub.mask.nxv1f64.nxv1f32.nxv1f32( - , - , - , - , - i64, - i64); - -define @intrinsic_vfwsub_mask_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv1f64_nxv1f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfwsub.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.mask.nxv1f64.nxv1f32.nxv1f32( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwsub.nxv2f64.nxv2f32.nxv2f32( - , - , - i64); - -define @intrinsic_vfwsub_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwsub_vv_nxv2f64_nxv2f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfwsub.vv v10, v8, v9 -; CHECK-NEXT: vmv2r.v v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.nxv2f64.nxv2f32.nxv2f32( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfwsub.mask.nxv2f64.nxv2f32.nxv2f32( - , - , - , - , - i64, - i64); - -define @intrinsic_vfwsub_mask_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv2f64_nxv2f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfwsub.vv v8, v10, v11, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.mask.nxv2f64.nxv2f32.nxv2f32( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwsub.nxv4f64.nxv4f32.nxv4f32( - , - , - i64); - -define @intrinsic_vfwsub_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwsub_vv_nxv4f64_nxv4f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfwsub.vv v12, v8, v10 -; CHECK-NEXT: vmv4r.v v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.nxv4f64.nxv4f32.nxv4f32( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfwsub.mask.nxv4f64.nxv4f32.nxv4f32( - , - , - , - , - i64, - i64); - -define @intrinsic_vfwsub_mask_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv4f64_nxv4f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfwsub.vv v8, v12, v14, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.mask.nxv4f64.nxv4f32.nxv4f32( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwsub.nxv8f64.nxv8f32.nxv8f32( - , - , - i64); - -define @intrinsic_vfwsub_vv_nxv8f64_nxv8f32_nxv8f32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwsub_vv_nxv8f64_nxv8f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfwsub.vv v16, v8, v12 -; CHECK-NEXT: vmv8r.v v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.nxv8f64.nxv8f32.nxv8f32( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfwsub.mask.nxv8f64.nxv8f32.nxv8f32( - , - , - , - , - i64, - i64); - -define @intrinsic_vfwsub_mask_vv_nxv8f64_nxv8f32_nxv8f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv8f64_nxv8f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfwsub.vv v8, v16, v20, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.mask.nxv8f64.nxv8f32.nxv8f32( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwsub.nxv1f32.nxv1f16.f16( - , - half, - i64); - -define @intrinsic_vfwsub_vf_nxv1f32_nxv1f16_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwsub_vf_nxv1f32_nxv1f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfwsub.vf v9, v8, fa0 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.nxv1f32.nxv1f16.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfwsub.mask.nxv1f32.nxv1f16.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfwsub_mask_vf_nxv1f32_nxv1f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv1f32_nxv1f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfwsub.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.mask.nxv1f32.nxv1f16.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwsub.nxv2f32.nxv2f16.f16( - , - half, - i64); - -define @intrinsic_vfwsub_vf_nxv2f32_nxv2f16_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwsub_vf_nxv2f32_nxv2f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfwsub.vf v9, v8, fa0 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.nxv2f32.nxv2f16.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfwsub.mask.nxv2f32.nxv2f16.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfwsub_mask_vf_nxv2f32_nxv2f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv2f32_nxv2f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfwsub.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.mask.nxv2f32.nxv2f16.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwsub.nxv4f32.nxv4f16.f16( - , - half, - i64); - -define @intrinsic_vfwsub_vf_nxv4f32_nxv4f16_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwsub_vf_nxv4f32_nxv4f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfwsub.vf v10, v8, fa0 -; CHECK-NEXT: vmv2r.v v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.nxv4f32.nxv4f16.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfwsub.mask.nxv4f32.nxv4f16.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfwsub_mask_vf_nxv4f32_nxv4f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv4f32_nxv4f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfwsub.vf v8, v10, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.mask.nxv4f32.nxv4f16.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwsub.nxv8f32.nxv8f16.f16( - , - half, - i64); - -define @intrinsic_vfwsub_vf_nxv8f32_nxv8f16_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwsub_vf_nxv8f32_nxv8f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfwsub.vf v12, v8, fa0 -; CHECK-NEXT: vmv4r.v v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.nxv8f32.nxv8f16.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfwsub.mask.nxv8f32.nxv8f16.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfwsub_mask_vf_nxv8f32_nxv8f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv8f32_nxv8f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfwsub.vf v8, v12, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.mask.nxv8f32.nxv8f16.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwsub.nxv16f32.nxv16f16.f16( - , - half, - i64); - -define @intrinsic_vfwsub_vf_nxv16f32_nxv16f16_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwsub_vf_nxv16f32_nxv16f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfwsub.vf v16, v8, fa0 -; CHECK-NEXT: vmv8r.v v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.nxv16f32.nxv16f16.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfwsub.mask.nxv16f32.nxv16f16.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfwsub_mask_vf_nxv16f32_nxv16f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv16f32_nxv16f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfwsub.vf v8, v16, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.mask.nxv16f32.nxv16f16.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwsub.nxv1f64.nxv1f32.f32( - , - float, - i64); - -define @intrinsic_vfwsub_vf_nxv1f64_nxv1f32_f32( %0, float %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwsub_vf_nxv1f64_nxv1f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfwsub.vf v9, v8, fa0 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.nxv1f64.nxv1f32.f32( - %0, - float %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfwsub.mask.nxv1f64.nxv1f32.f32( - , - , - float, - , - i64, - i64); - -define @intrinsic_vfwsub_mask_vf_nxv1f64_nxv1f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv1f64_nxv1f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfwsub.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.mask.nxv1f64.nxv1f32.f32( - %0, - %1, - float %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwsub.nxv2f64.nxv2f32.f32( - , - float, - i64); - -define @intrinsic_vfwsub_vf_nxv2f64_nxv2f32_f32( %0, float %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwsub_vf_nxv2f64_nxv2f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfwsub.vf v10, v8, fa0 -; CHECK-NEXT: vmv2r.v v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.nxv2f64.nxv2f32.f32( - %0, - float %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfwsub.mask.nxv2f64.nxv2f32.f32( - , - , - float, - , - i64, - i64); - -define @intrinsic_vfwsub_mask_vf_nxv2f64_nxv2f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv2f64_nxv2f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfwsub.vf v8, v10, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.mask.nxv2f64.nxv2f32.f32( - %0, - %1, - float %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwsub.nxv4f64.nxv4f32.f32( - , - float, - i64); - -define @intrinsic_vfwsub_vf_nxv4f64_nxv4f32_f32( %0, float %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwsub_vf_nxv4f64_nxv4f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfwsub.vf v12, v8, fa0 -; CHECK-NEXT: vmv4r.v v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.nxv4f64.nxv4f32.f32( - %0, - float %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfwsub.mask.nxv4f64.nxv4f32.f32( - , - , - float, - , - i64, - i64); - -define @intrinsic_vfwsub_mask_vf_nxv4f64_nxv4f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv4f64_nxv4f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfwsub.vf v8, v12, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.mask.nxv4f64.nxv4f32.f32( - %0, - %1, - float %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwsub.nxv8f64.nxv8f32.f32( - , - float, - i64); - -define @intrinsic_vfwsub_vf_nxv8f64_nxv8f32_f32( %0, float %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwsub_vf_nxv8f64_nxv8f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfwsub.vf v16, v8, fa0 -; CHECK-NEXT: vmv8r.v v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.nxv8f64.nxv8f32.f32( - %0, - float %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfwsub.mask.nxv8f64.nxv8f32.f32( - , - , - float, - , - i64, - i64); - -define @intrinsic_vfwsub_mask_vf_nxv8f64_nxv8f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv8f64_nxv8f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfwsub.vf v8, v16, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.mask.nxv8f64.nxv8f32.f32( - %0, - %1, - float %2, - %3, - i64 %4, i64 1) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwsub-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwsub.ll similarity index 86% rename from llvm/test/CodeGen/RISCV/rvv/vfwsub-rv32.ll rename to llvm/test/CodeGen/RISCV/rvv/vfwsub.ll index e81121f848ddbadc995540166e9c92412fe61f82..916abcae0de0dacc2193d13aa477be822dba3aba 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfwsub-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwsub.ll @@ -1,12 +1,14 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: -target-abi=ilp32d < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfwsub.nxv1f32.nxv1f16.nxv1f16( , , - i32); + iXLen); -define @intrinsic_vfwsub_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, i32 %2) nounwind { +define @intrinsic_vfwsub_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_vv_nxv1f32_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -17,7 +19,7 @@ entry: %a = call @llvm.riscv.vfwsub.nxv1f32.nxv1f16.nxv1f16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -27,10 +29,10 @@ declare @llvm.riscv.vfwsub.mask.nxv1f32.nxv1f16.nxv1f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwsub_mask_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwsub_mask_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv1f32_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -42,7 +44,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -50,9 +52,9 @@ entry: declare @llvm.riscv.vfwsub.nxv2f32.nxv2f16.nxv2f16( , , - i32); + iXLen); -define @intrinsic_vfwsub_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, i32 %2) nounwind { +define @intrinsic_vfwsub_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_vv_nxv2f32_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -63,7 +65,7 @@ entry: %a = call @llvm.riscv.vfwsub.nxv2f32.nxv2f16.nxv2f16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -73,10 +75,10 @@ declare @llvm.riscv.vfwsub.mask.nxv2f32.nxv2f16.nxv2f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwsub_mask_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwsub_mask_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv2f32_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -88,7 +90,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -96,9 +98,9 @@ entry: declare @llvm.riscv.vfwsub.nxv4f32.nxv4f16.nxv4f16( , , - i32); + iXLen); -define @intrinsic_vfwsub_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, i32 %2) nounwind { +define @intrinsic_vfwsub_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_vv_nxv4f32_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -109,7 +111,7 @@ entry: %a = call @llvm.riscv.vfwsub.nxv4f32.nxv4f16.nxv4f16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -119,10 +121,10 @@ declare @llvm.riscv.vfwsub.mask.nxv4f32.nxv4f16.nxv4f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwsub_mask_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwsub_mask_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv4f32_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -134,7 +136,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -142,9 +144,9 @@ entry: declare @llvm.riscv.vfwsub.nxv8f32.nxv8f16.nxv8f16( , , - i32); + iXLen); -define @intrinsic_vfwsub_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, i32 %2) nounwind { +define @intrinsic_vfwsub_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_vv_nxv8f32_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -155,7 +157,7 @@ entry: %a = call @llvm.riscv.vfwsub.nxv8f32.nxv8f16.nxv8f16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -165,10 +167,10 @@ declare @llvm.riscv.vfwsub.mask.nxv8f32.nxv8f16.nxv8f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwsub_mask_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwsub_mask_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv8f32_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -180,7 +182,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -188,9 +190,9 @@ entry: declare @llvm.riscv.vfwsub.nxv16f32.nxv16f16.nxv16f16( , , - i32); + iXLen); -define @intrinsic_vfwsub_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, i32 %2) nounwind { +define @intrinsic_vfwsub_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_vv_nxv16f32_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -201,7 +203,7 @@ entry: %a = call @llvm.riscv.vfwsub.nxv16f32.nxv16f16.nxv16f16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -211,10 +213,10 @@ declare @llvm.riscv.vfwsub.mask.nxv16f32.nxv16f16.nxv16f16 , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwsub_mask_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwsub_mask_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv16f32_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -226,7 +228,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -234,9 +236,9 @@ entry: declare @llvm.riscv.vfwsub.nxv1f64.nxv1f32.nxv1f32( , , - i32); + iXLen); -define @intrinsic_vfwsub_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, i32 %2) nounwind { +define @intrinsic_vfwsub_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_vv_nxv1f64_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -247,7 +249,7 @@ entry: %a = call @llvm.riscv.vfwsub.nxv1f64.nxv1f32.nxv1f32( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -257,10 +259,10 @@ declare @llvm.riscv.vfwsub.mask.nxv1f64.nxv1f32.nxv1f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwsub_mask_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwsub_mask_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv1f64_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -272,7 +274,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -280,9 +282,9 @@ entry: declare @llvm.riscv.vfwsub.nxv2f64.nxv2f32.nxv2f32( , , - i32); + iXLen); -define @intrinsic_vfwsub_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, i32 %2) nounwind { +define @intrinsic_vfwsub_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_vv_nxv2f64_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -293,7 +295,7 @@ entry: %a = call @llvm.riscv.vfwsub.nxv2f64.nxv2f32.nxv2f32( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -303,10 +305,10 @@ declare @llvm.riscv.vfwsub.mask.nxv2f64.nxv2f32.nxv2f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwsub_mask_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwsub_mask_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv2f64_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -318,7 +320,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -326,9 +328,9 @@ entry: declare @llvm.riscv.vfwsub.nxv4f64.nxv4f32.nxv4f32( , , - i32); + iXLen); -define @intrinsic_vfwsub_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, i32 %2) nounwind { +define @intrinsic_vfwsub_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_vv_nxv4f64_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -339,7 +341,7 @@ entry: %a = call @llvm.riscv.vfwsub.nxv4f64.nxv4f32.nxv4f32( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -349,10 +351,10 @@ declare @llvm.riscv.vfwsub.mask.nxv4f64.nxv4f32.nxv4f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwsub_mask_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwsub_mask_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv4f64_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -364,7 +366,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -372,9 +374,9 @@ entry: declare @llvm.riscv.vfwsub.nxv8f64.nxv8f32.nxv8f32( , , - i32); + iXLen); -define @intrinsic_vfwsub_vv_nxv8f64_nxv8f32_nxv8f32( %0, %1, i32 %2) nounwind { +define @intrinsic_vfwsub_vv_nxv8f64_nxv8f32_nxv8f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_vv_nxv8f64_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -385,7 +387,7 @@ entry: %a = call @llvm.riscv.vfwsub.nxv8f64.nxv8f32.nxv8f32( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -395,10 +397,10 @@ declare @llvm.riscv.vfwsub.mask.nxv8f64.nxv8f32.nxv8f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwsub_mask_vv_nxv8f64_nxv8f32_nxv8f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwsub_mask_vv_nxv8f64_nxv8f32_nxv8f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv8f64_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -410,7 +412,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -418,9 +420,9 @@ entry: declare @llvm.riscv.vfwsub.nxv1f32.nxv1f16.f16( , half, - i32); + iXLen); -define @intrinsic_vfwsub_vf_nxv1f32_nxv1f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfwsub_vf_nxv1f32_nxv1f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_vf_nxv1f32_nxv1f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -431,7 +433,7 @@ entry: %a = call @llvm.riscv.vfwsub.nxv1f32.nxv1f16.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -441,10 +443,10 @@ declare @llvm.riscv.vfwsub.mask.nxv1f32.nxv1f16.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwsub_mask_vf_nxv1f32_nxv1f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfwsub_mask_vf_nxv1f32_nxv1f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv1f32_nxv1f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -456,7 +458,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -464,9 +466,9 @@ entry: declare @llvm.riscv.vfwsub.nxv2f32.nxv2f16.f16( , half, - i32); + iXLen); -define @intrinsic_vfwsub_vf_nxv2f32_nxv2f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfwsub_vf_nxv2f32_nxv2f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_vf_nxv2f32_nxv2f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -477,7 +479,7 @@ entry: %a = call @llvm.riscv.vfwsub.nxv2f32.nxv2f16.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -487,10 +489,10 @@ declare @llvm.riscv.vfwsub.mask.nxv2f32.nxv2f16.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwsub_mask_vf_nxv2f32_nxv2f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfwsub_mask_vf_nxv2f32_nxv2f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv2f32_nxv2f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -502,7 +504,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -510,9 +512,9 @@ entry: declare @llvm.riscv.vfwsub.nxv4f32.nxv4f16.f16( , half, - i32); + iXLen); -define @intrinsic_vfwsub_vf_nxv4f32_nxv4f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfwsub_vf_nxv4f32_nxv4f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_vf_nxv4f32_nxv4f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -523,7 +525,7 @@ entry: %a = call @llvm.riscv.vfwsub.nxv4f32.nxv4f16.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -533,10 +535,10 @@ declare @llvm.riscv.vfwsub.mask.nxv4f32.nxv4f16.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwsub_mask_vf_nxv4f32_nxv4f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfwsub_mask_vf_nxv4f32_nxv4f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv4f32_nxv4f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -548,7 +550,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -556,9 +558,9 @@ entry: declare @llvm.riscv.vfwsub.nxv8f32.nxv8f16.f16( , half, - i32); + iXLen); -define @intrinsic_vfwsub_vf_nxv8f32_nxv8f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfwsub_vf_nxv8f32_nxv8f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_vf_nxv8f32_nxv8f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -569,7 +571,7 @@ entry: %a = call @llvm.riscv.vfwsub.nxv8f32.nxv8f16.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -579,10 +581,10 @@ declare @llvm.riscv.vfwsub.mask.nxv8f32.nxv8f16.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwsub_mask_vf_nxv8f32_nxv8f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfwsub_mask_vf_nxv8f32_nxv8f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv8f32_nxv8f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -594,7 +596,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -602,9 +604,9 @@ entry: declare @llvm.riscv.vfwsub.nxv16f32.nxv16f16.f16( , half, - i32); + iXLen); -define @intrinsic_vfwsub_vf_nxv16f32_nxv16f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfwsub_vf_nxv16f32_nxv16f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_vf_nxv16f32_nxv16f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -615,7 +617,7 @@ entry: %a = call @llvm.riscv.vfwsub.nxv16f32.nxv16f16.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -625,10 +627,10 @@ declare @llvm.riscv.vfwsub.mask.nxv16f32.nxv16f16.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwsub_mask_vf_nxv16f32_nxv16f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfwsub_mask_vf_nxv16f32_nxv16f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv16f32_nxv16f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -640,7 +642,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -648,9 +650,9 @@ entry: declare @llvm.riscv.vfwsub.nxv1f64.nxv1f32.f32( , float, - i32); + iXLen); -define @intrinsic_vfwsub_vf_nxv1f64_nxv1f32_f32( %0, float %1, i32 %2) nounwind { +define @intrinsic_vfwsub_vf_nxv1f64_nxv1f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_vf_nxv1f64_nxv1f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -661,7 +663,7 @@ entry: %a = call @llvm.riscv.vfwsub.nxv1f64.nxv1f32.f32( %0, float %1, - i32 %2) + iXLen %2) ret %a } @@ -671,10 +673,10 @@ declare @llvm.riscv.vfwsub.mask.nxv1f64.nxv1f32.f32( , float, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwsub_mask_vf_nxv1f64_nxv1f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +define @intrinsic_vfwsub_mask_vf_nxv1f64_nxv1f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv1f64_nxv1f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -686,7 +688,7 @@ entry: %1, float %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -694,9 +696,9 @@ entry: declare @llvm.riscv.vfwsub.nxv2f64.nxv2f32.f32( , float, - i32); + iXLen); -define @intrinsic_vfwsub_vf_nxv2f64_nxv2f32_f32( %0, float %1, i32 %2) nounwind { +define @intrinsic_vfwsub_vf_nxv2f64_nxv2f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_vf_nxv2f64_nxv2f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -707,7 +709,7 @@ entry: %a = call @llvm.riscv.vfwsub.nxv2f64.nxv2f32.f32( %0, float %1, - i32 %2) + iXLen %2) ret %a } @@ -717,10 +719,10 @@ declare @llvm.riscv.vfwsub.mask.nxv2f64.nxv2f32.f32( , float, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwsub_mask_vf_nxv2f64_nxv2f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +define @intrinsic_vfwsub_mask_vf_nxv2f64_nxv2f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv2f64_nxv2f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -732,7 +734,7 @@ entry: %1, float %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -740,9 +742,9 @@ entry: declare @llvm.riscv.vfwsub.nxv4f64.nxv4f32.f32( , float, - i32); + iXLen); -define @intrinsic_vfwsub_vf_nxv4f64_nxv4f32_f32( %0, float %1, i32 %2) nounwind { +define @intrinsic_vfwsub_vf_nxv4f64_nxv4f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_vf_nxv4f64_nxv4f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -753,7 +755,7 @@ entry: %a = call @llvm.riscv.vfwsub.nxv4f64.nxv4f32.f32( %0, float %1, - i32 %2) + iXLen %2) ret %a } @@ -763,10 +765,10 @@ declare @llvm.riscv.vfwsub.mask.nxv4f64.nxv4f32.f32( , float, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwsub_mask_vf_nxv4f64_nxv4f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +define @intrinsic_vfwsub_mask_vf_nxv4f64_nxv4f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv4f64_nxv4f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -778,7 +780,7 @@ entry: %1, float %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -786,9 +788,9 @@ entry: declare @llvm.riscv.vfwsub.nxv8f64.nxv8f32.f32( , float, - i32); + iXLen); -define @intrinsic_vfwsub_vf_nxv8f64_nxv8f32_f32( %0, float %1, i32 %2) nounwind { +define @intrinsic_vfwsub_vf_nxv8f64_nxv8f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_vf_nxv8f64_nxv8f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -799,7 +801,7 @@ entry: %a = call @llvm.riscv.vfwsub.nxv8f64.nxv8f32.f32( %0, float %1, - i32 %2) + iXLen %2) ret %a } @@ -809,10 +811,10 @@ declare @llvm.riscv.vfwsub.mask.nxv8f64.nxv8f32.f32( , float, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwsub_mask_vf_nxv8f64_nxv8f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +define @intrinsic_vfwsub_mask_vf_nxv8f64_nxv8f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv8f64_nxv8f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -824,7 +826,7 @@ entry: %1, float %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwsub.w-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwsub.w-rv32.ll deleted file mode 100644 index da2290be93d27ea48d3d32c73f1ae6dd5d07bae9..0000000000000000000000000000000000000000 --- a/llvm/test/CodeGen/RISCV/rvv/vfwsub.w-rv32.ll +++ /dev/null @@ -1,1248 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: -target-abi=ilp32d < %s | FileCheck %s -declare @llvm.riscv.vfwsub.w.nxv1f32.nxv1f16( - , - , - i32); - -define @intrinsic_vfwsub.w_wv_nxv1f32_nxv1f32_nxv1f16( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv1f32_nxv1f32_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfwsub.wv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.w.nxv1f32.nxv1f16( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfwsub.w.mask.nxv1f32.nxv1f16( - , - , - , - , - i32, - i32); - -define @intrinsic_vfwsub.w_mask_wv_nxv1f32_nxv1f32_nxv1f16( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv1f32_nxv1f32_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfwsub.wv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.w.mask.nxv1f32.nxv1f16( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfwsub.w.nxv2f32.nxv2f16( - , - , - i32); - -define @intrinsic_vfwsub.w_wv_nxv2f32_nxv2f32_nxv2f16( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv2f32_nxv2f32_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfwsub.wv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.w.nxv2f32.nxv2f16( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfwsub.w.mask.nxv2f32.nxv2f16( - , - , - , - , - i32, - i32); - -define @intrinsic_vfwsub.w_mask_wv_nxv2f32_nxv2f32_nxv2f16( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv2f32_nxv2f32_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfwsub.wv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.w.mask.nxv2f32.nxv2f16( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfwsub.w.nxv4f32.nxv4f16( - , - , - i32); - -define @intrinsic_vfwsub.w_wv_nxv4f32_nxv4f32_nxv4f16( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv4f32_nxv4f32_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfwsub.wv v8, v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.w.nxv4f32.nxv4f16( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfwsub.w.mask.nxv4f32.nxv4f16( - , - , - , - , - i32, - i32); - -define @intrinsic_vfwsub.w_mask_wv_nxv4f32_nxv4f32_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv4f32_nxv4f32_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfwsub.wv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.w.mask.nxv4f32.nxv4f16( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfwsub.w.nxv8f32.nxv8f16( - , - , - i32); - -define @intrinsic_vfwsub.w_wv_nxv8f32_nxv8f32_nxv8f16( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv8f32_nxv8f32_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfwsub.wv v8, v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.w.nxv8f32.nxv8f16( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfwsub.w.mask.nxv8f32.nxv8f16( - , - , - , - , - i32, - i32); - -define @intrinsic_vfwsub.w_mask_wv_nxv8f32_nxv8f32_nxv8f16( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv8f32_nxv8f32_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfwsub.wv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.w.mask.nxv8f32.nxv8f16( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfwsub.w.nxv16f32.nxv16f16( - , - , - i32); - -define @intrinsic_vfwsub.w_wv_nxv16f32_nxv16f32_nxv16f16( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv16f32_nxv16f32_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfwsub.wv v8, v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.w.nxv16f32.nxv16f16( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfwsub.w.mask.nxv16f32.nxv16f16( - , - , - , - , - i32, - i32); - -define @intrinsic_vfwsub.w_mask_wv_nxv16f32_nxv16f32_nxv16f16( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv16f32_nxv16f32_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vl4re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vfwsub.wv v8, v16, v24, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.w.mask.nxv16f32.nxv16f16( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfwsub.w.nxv1f64.nxv1f32( - , - , - i32); - -define @intrinsic_vfwsub.w_wv_nxv1f64_nxv1f64_nxv1f32( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv1f64_nxv1f64_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfwsub.wv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.w.nxv1f64.nxv1f32( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfwsub.w.mask.nxv1f64.nxv1f32( - , - , - , - , - i32, - i32); - -define @intrinsic_vfwsub.w_mask_wv_nxv1f64_nxv1f64_nxv1f32( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv1f64_nxv1f64_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfwsub.wv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.w.mask.nxv1f64.nxv1f32( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfwsub.w.nxv2f64.nxv2f32( - , - , - i32); - -define @intrinsic_vfwsub.w_wv_nxv2f64_nxv2f64_nxv2f32( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv2f64_nxv2f64_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfwsub.wv v8, v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.w.nxv2f64.nxv2f32( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfwsub.w.mask.nxv2f64.nxv2f32( - , - , - , - , - i32, - i32); - -define @intrinsic_vfwsub.w_mask_wv_nxv2f64_nxv2f64_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv2f64_nxv2f64_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfwsub.wv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.w.mask.nxv2f64.nxv2f32( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfwsub.w.nxv4f64.nxv4f32( - , - , - i32); - -define @intrinsic_vfwsub.w_wv_nxv4f64_nxv4f64_nxv4f32( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv4f64_nxv4f64_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfwsub.wv v8, v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.w.nxv4f64.nxv4f32( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfwsub.w.mask.nxv4f64.nxv4f32( - , - , - , - , - i32, - i32); - -define @intrinsic_vfwsub.w_mask_wv_nxv4f64_nxv4f64_nxv4f32( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv4f64_nxv4f64_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfwsub.wv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.w.mask.nxv4f64.nxv4f32( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfwsub.w.nxv8f64.nxv8f32( - , - , - i32); - -define @intrinsic_vfwsub.w_wv_nxv8f64_nxv8f64_nxv8f32( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv8f64_nxv8f64_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfwsub.wv v8, v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.w.nxv8f64.nxv8f32( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfwsub.w.mask.nxv8f64.nxv8f32( - , - , - , - , - i32, - i32); - -define @intrinsic_vfwsub.w_mask_wv_nxv8f64_nxv8f64_nxv8f32( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv8f64_nxv8f64_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vl4re32.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vfwsub.wv v8, v16, v24, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.w.mask.nxv8f64.nxv8f32( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfwsub.w.nxv1f32.f16( - , - half, - i32); - -define @intrinsic_vfwsub.w_wf_nxv1f32_nxv1f32_f16( %0, half %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv1f32_nxv1f32_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfwsub.wf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.w.nxv1f32.f16( - %0, - half %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfwsub.w.mask.nxv1f32.f16( - , - , - half, - , - i32, - i32); - -define @intrinsic_vfwsub.w_mask_wf_nxv1f32_nxv1f32_f16( %0, %1, half %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv1f32_nxv1f32_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfwsub.wf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.w.mask.nxv1f32.f16( - %0, - %1, - half %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfwsub.w.nxv2f32.f16( - , - half, - i32); - -define @intrinsic_vfwsub.w_wf_nxv2f32_nxv2f32_f16( %0, half %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv2f32_nxv2f32_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfwsub.wf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.w.nxv2f32.f16( - %0, - half %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfwsub.w.mask.nxv2f32.f16( - , - , - half, - , - i32, - i32); - -define @intrinsic_vfwsub.w_mask_wf_nxv2f32_nxv2f32_f16( %0, %1, half %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv2f32_nxv2f32_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfwsub.wf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.w.mask.nxv2f32.f16( - %0, - %1, - half %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfwsub.w.nxv4f32.f16( - , - half, - i32); - -define @intrinsic_vfwsub.w_wf_nxv4f32_nxv4f32_f16( %0, half %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv4f32_nxv4f32_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfwsub.wf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.w.nxv4f32.f16( - %0, - half %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfwsub.w.mask.nxv4f32.f16( - , - , - half, - , - i32, - i32); - -define @intrinsic_vfwsub.w_mask_wf_nxv4f32_nxv4f32_f16( %0, %1, half %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv4f32_nxv4f32_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfwsub.wf v8, v10, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.w.mask.nxv4f32.f16( - %0, - %1, - half %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfwsub.w.nxv8f32.f16( - , - half, - i32); - -define @intrinsic_vfwsub.w_wf_nxv8f32_nxv8f32_f16( %0, half %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv8f32_nxv8f32_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfwsub.wf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.w.nxv8f32.f16( - %0, - half %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfwsub.w.mask.nxv8f32.f16( - , - , - half, - , - i32, - i32); - -define @intrinsic_vfwsub.w_mask_wf_nxv8f32_nxv8f32_f16( %0, %1, half %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv8f32_nxv8f32_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfwsub.wf v8, v12, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.w.mask.nxv8f32.f16( - %0, - %1, - half %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfwsub.w.nxv16f32.f16( - , - half, - i32); - -define @intrinsic_vfwsub.w_wf_nxv16f32_nxv16f32_f16( %0, half %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv16f32_nxv16f32_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfwsub.wf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.w.nxv16f32.f16( - %0, - half %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfwsub.w.mask.nxv16f32.f16( - , - , - half, - , - i32, - i32); - -define @intrinsic_vfwsub.w_mask_wf_nxv16f32_nxv16f32_f16( %0, %1, half %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv16f32_nxv16f32_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfwsub.wf v8, v16, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.w.mask.nxv16f32.f16( - %0, - %1, - half %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfwsub.w.nxv1f64.f32( - , - float, - i32); - -define @intrinsic_vfwsub.w_wf_nxv1f64_nxv1f64_f32( %0, float %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv1f64_nxv1f64_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfwsub.wf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.w.nxv1f64.f32( - %0, - float %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfwsub.w.mask.nxv1f64.f32( - , - , - float, - , - i32, - i32); - -define @intrinsic_vfwsub.w_mask_wf_nxv1f64_nxv1f64_f32( %0, %1, float %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv1f64_nxv1f64_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfwsub.wf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.w.mask.nxv1f64.f32( - %0, - %1, - float %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfwsub.w.nxv2f64.f32( - , - float, - i32); - -define @intrinsic_vfwsub.w_wf_nxv2f64_nxv2f64_f32( %0, float %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv2f64_nxv2f64_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfwsub.wf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.w.nxv2f64.f32( - %0, - float %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfwsub.w.mask.nxv2f64.f32( - , - , - float, - , - i32, - i32); - -define @intrinsic_vfwsub.w_mask_wf_nxv2f64_nxv2f64_f32( %0, %1, float %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv2f64_nxv2f64_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfwsub.wf v8, v10, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.w.mask.nxv2f64.f32( - %0, - %1, - float %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfwsub.w.nxv4f64.f32( - , - float, - i32); - -define @intrinsic_vfwsub.w_wf_nxv4f64_nxv4f64_f32( %0, float %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv4f64_nxv4f64_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfwsub.wf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.w.nxv4f64.f32( - %0, - float %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfwsub.w.mask.nxv4f64.f32( - , - , - float, - , - i32, - i32); - -define @intrinsic_vfwsub.w_mask_wf_nxv4f64_nxv4f64_f32( %0, %1, float %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv4f64_nxv4f64_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfwsub.wf v8, v12, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.w.mask.nxv4f64.f32( - %0, - %1, - float %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfwsub.w.nxv8f64.f32( - , - float, - i32); - -define @intrinsic_vfwsub.w_wf_nxv8f64_nxv8f64_f32( %0, float %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv8f64_nxv8f64_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfwsub.wf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.w.nxv8f64.f32( - %0, - float %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfwsub.w.mask.nxv8f64.f32( - , - , - float, - , - i32, - i32); - -define @intrinsic_vfwsub.w_mask_wf_nxv8f64_nxv8f64_f32( %0, %1, float %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv8f64_nxv8f64_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfwsub.wf v8, v16, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.w.mask.nxv8f64.f32( - %0, - %1, - float %2, - %3, - i32 %4, i32 1) - - ret %a -} - -define @intrinsic_vfwsub.w_mask_wv_tie_nxv1f32_nxv1f32_nxv1f16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_tie_nxv1f32_nxv1f32_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfwsub.wv v8, v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.w.mask.nxv1f32.nxv1f16( - %0, - %0, - %1, - %2, - i32 %3, i32 1) - - ret %a -} - -define @intrinsic_vfwsub.w_mask_wv_tie_nxv2f32_nxv2f32_nxv2f16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_tie_nxv2f32_nxv2f32_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfwsub.wv v8, v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.w.mask.nxv2f32.nxv2f16( - %0, - %0, - %1, - %2, - i32 %3, i32 1) - - ret %a -} - -define @intrinsic_vfwsub.w_mask_wv_tie_nxv4f32_nxv4f32_nxv4f16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_tie_nxv4f32_nxv4f32_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfwsub.wv v8, v8, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.w.mask.nxv4f32.nxv4f16( - %0, - %0, - %1, - %2, - i32 %3, i32 1) - - ret %a -} - -define @intrinsic_vfwsub.w_mask_wv_tie_nxv8f32_nxv8f32_nxv8f16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_tie_nxv8f32_nxv8f32_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfwsub.wv v8, v8, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.w.mask.nxv8f32.nxv8f16( - %0, - %0, - %1, - %2, - i32 %3, i32 1) - - ret %a -} - -define @intrinsic_vfwsub.w_mask_wv_tie_nxv16f32_nxv16f32_nxv16f16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_tie_nxv16f32_nxv16f32_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfwsub.wv v8, v8, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.w.mask.nxv16f32.nxv16f16( - %0, - %0, - %1, - %2, - i32 %3, i32 1) - - ret %a -} - -define @intrinsic_vfwsub.w_mask_wv_tie_nxv1f64_nxv1f64_nxv1f32( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_tie_nxv1f64_nxv1f64_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfwsub.wv v8, v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.w.mask.nxv1f64.nxv1f32( - %0, - %0, - %1, - %2, - i32 %3, i32 1) - - ret %a -} - -define @intrinsic_vfwsub.w_mask_wv_tie_nxv2f64_nxv2f64_nxv2f32( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_tie_nxv2f64_nxv2f64_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfwsub.wv v8, v8, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.w.mask.nxv2f64.nxv2f32( - %0, - %0, - %1, - %2, - i32 %3, i32 1) - - ret %a -} - -define @intrinsic_vfwsub.w_mask_wv_tie_nxv4f64_nxv4f64_nxv4f32( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_tie_nxv4f64_nxv4f64_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfwsub.wv v8, v8, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.w.mask.nxv4f64.nxv4f32( - %0, - %0, - %1, - %2, - i32 %3, i32 1) - - ret %a -} - -define @intrinsic_vfwsub.w_mask_wv_tie_nxv8f64_nxv8f64_nxv8f32( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_tie_nxv8f64_nxv8f64_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfwsub.wv v8, v8, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.w.mask.nxv8f64.nxv8f32( - %0, - %0, - %1, - %2, - i32 %3, i32 1) - - ret %a -} - -define @intrinsic_vfwsub.w_mask_wf_tie_nxv1f32_nxv1f32_f16( %0, half %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_tie_nxv1f32_nxv1f32_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfwsub.wf v8, v8, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.w.mask.nxv1f32.f16( - %0, - %0, - half %1, - %2, - i32 %3, i32 1) - - ret %a -} - -define @intrinsic_vfwsub.w_mask_wf_tie_nxv2f32_nxv2f32_f16( %0, half %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_tie_nxv2f32_nxv2f32_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfwsub.wf v8, v8, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.w.mask.nxv2f32.f16( - %0, - %0, - half %1, - %2, - i32 %3, i32 1) - - ret %a -} - -define @intrinsic_vfwsub.w_mask_wf_tie_nxv4f32_nxv4f32_f16( %0, half %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_tie_nxv4f32_nxv4f32_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfwsub.wf v8, v8, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.w.mask.nxv4f32.f16( - %0, - %0, - half %1, - %2, - i32 %3, i32 1) - - ret %a -} - -define @intrinsic_vfwsub.w_mask_wf_tie_nxv8f32_nxv8f32_f16( %0, half %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_tie_nxv8f32_nxv8f32_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfwsub.wf v8, v8, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.w.mask.nxv8f32.f16( - %0, - %0, - half %1, - %2, - i32 %3, i32 1) - - ret %a -} - -define @intrinsic_vfwsub.w_mask_wf_tie_nxv16f32_nxv16f32_f16( %0, half %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_tie_nxv16f32_nxv16f32_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfwsub.wf v8, v8, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.w.mask.nxv16f32.f16( - %0, - %0, - half %1, - %2, - i32 %3, i32 1) - - ret %a -} - -define @intrinsic_vfwsub.w_mask_wf_tie_nxv1f64_nxv1f64_f32( %0, float %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_tie_nxv1f64_nxv1f64_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfwsub.wf v8, v8, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.w.mask.nxv1f64.f32( - %0, - %0, - float %1, - %2, - i32 %3, i32 1) - - ret %a -} - -define @intrinsic_vfwsub.w_mask_wf_tie_nxv2f64_nxv2f64_f32( %0, float %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_tie_nxv2f64_nxv2f64_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfwsub.wf v8, v8, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.w.mask.nxv2f64.f32( - %0, - %0, - float %1, - %2, - i32 %3, i32 1) - - ret %a -} - -define @intrinsic_vfwsub.w_mask_wf_tie_nxv4f64_nxv4f64_f32( %0, float %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_tie_nxv4f64_nxv4f64_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfwsub.wf v8, v8, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.w.mask.nxv4f64.f32( - %0, - %0, - float %1, - %2, - i32 %3, i32 1) - - ret %a -} - -define @intrinsic_vfwsub.w_mask_wf_tie_nxv8f64_nxv8f64_f32( %0, float %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_tie_nxv8f64_nxv8f64_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfwsub.wf v8, v8, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.w.mask.nxv8f64.f32( - %0, - %0, - float %1, - %2, - i32 %3, i32 1) - - ret %a -} - -define @intrinsic_vfwsub.w_wv_untie_nxv1f32_nxv1f32_nxv1f16( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwsub.w_wv_untie_nxv1f32_nxv1f32_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfwsub.wv v10, v9, v8 -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.w.nxv1f32.nxv1f16( - %1, - %0, - i32 %2) - - ret %a -} - -define @intrinsic_vfwsub.w_wv_untie_nxv2f32_nxv2f32_nxv2f16( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwsub.w_wv_untie_nxv2f32_nxv2f32_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfwsub.wv v10, v9, v8 -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.w.nxv2f32.nxv2f16( - %1, - %0, - i32 %2) - - ret %a -} - -define @intrinsic_vfwsub.w_wv_untie_nxv4f32_nxv4f32_nxv4f16( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwsub.w_wv_untie_nxv4f32_nxv4f32_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfwsub.wv v12, v10, v8 -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.w.nxv4f32.nxv4f16( - %1, - %0, - i32 %2) - - ret %a -} - -define @intrinsic_vfwsub.w_wv_untie_nxv8f32_nxv8f32_nxv8f16( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwsub.w_wv_untie_nxv8f32_nxv8f32_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfwsub.wv v16, v12, v8 -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.w.nxv8f32.nxv8f16( - %1, - %0, - i32 %2) - - ret %a -} - -define @intrinsic_vfwsub.w_wv_untie_nxv1f64_nxv1f64_nxv1f32( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwsub.w_wv_untie_nxv1f64_nxv1f64_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfwsub.wv v10, v9, v8 -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.w.nxv1f64.nxv1f32( - %1, - %0, - i32 %2) - - ret %a -} - -define @intrinsic_vfwsub.w_wv_untie_nxv2f64_nxv2f64_nxv2f32( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwsub.w_wv_untie_nxv2f64_nxv2f64_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfwsub.wv v12, v10, v8 -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.w.nxv2f64.nxv2f32( - %1, - %0, - i32 %2) - - ret %a -} - -define @intrinsic_vfwsub.w_wv_untie_nxv4f64_nxv4f64_nxv4f32( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwsub.w_wv_untie_nxv4f64_nxv4f64_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfwsub.wv v16, v12, v8 -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.w.nxv4f64.nxv4f32( - %1, - %0, - i32 %2) - - ret %a -} - -define @intrinsic_vfwsub.w_wv_untie_nxv8f64_nxv8f64_nxv8f32( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwsub.w_wv_untie_nxv8f64_nxv8f64_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfwsub.wv v24, v16, v8 -; CHECK-NEXT: vmv8r.v v8, v24 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.w.nxv8f64.nxv8f32( - %1, - %0, - i32 %2) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwsub.w-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwsub.w.ll similarity index 88% rename from llvm/test/CodeGen/RISCV/rvv/vfwsub.w-rv64.ll rename to llvm/test/CodeGen/RISCV/rvv/vfwsub.w.ll index ec0bd527dafe6666adb692aefeba3aaa9eb2de19..b5d008c3e1ed5881e98e0b4852db3e996f911dc8 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfwsub.w-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwsub.w.ll @@ -1,12 +1,14 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: -target-abi=lp64d < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfwsub.w.nxv1f32.nxv1f16( , , - i64); + iXLen); -define @intrinsic_vfwsub.w_wv_nxv1f32_nxv1f32_nxv1f16( %0, %1, i64 %2) nounwind { +define @intrinsic_vfwsub.w_wv_nxv1f32_nxv1f32_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv1f32_nxv1f32_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -16,7 +18,7 @@ entry: %a = call @llvm.riscv.vfwsub.w.nxv1f32.nxv1f16( %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -26,10 +28,10 @@ declare @llvm.riscv.vfwsub.w.mask.nxv1f32.nxv1f16( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfwsub.w_mask_wv_nxv1f32_nxv1f32_nxv1f16( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfwsub.w_mask_wv_nxv1f32_nxv1f32_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv1f32_nxv1f32_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -41,7 +43,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -49,9 +51,9 @@ entry: declare @llvm.riscv.vfwsub.w.nxv2f32.nxv2f16( , , - i64); + iXLen); -define @intrinsic_vfwsub.w_wv_nxv2f32_nxv2f32_nxv2f16( %0, %1, i64 %2) nounwind { +define @intrinsic_vfwsub.w_wv_nxv2f32_nxv2f32_nxv2f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv2f32_nxv2f32_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -61,7 +63,7 @@ entry: %a = call @llvm.riscv.vfwsub.w.nxv2f32.nxv2f16( %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -71,10 +73,10 @@ declare @llvm.riscv.vfwsub.w.mask.nxv2f32.nxv2f16( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfwsub.w_mask_wv_nxv2f32_nxv2f32_nxv2f16( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfwsub.w_mask_wv_nxv2f32_nxv2f32_nxv2f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv2f32_nxv2f32_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -86,7 +88,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -94,9 +96,9 @@ entry: declare @llvm.riscv.vfwsub.w.nxv4f32.nxv4f16( , , - i64); + iXLen); -define @intrinsic_vfwsub.w_wv_nxv4f32_nxv4f32_nxv4f16( %0, %1, i64 %2) nounwind { +define @intrinsic_vfwsub.w_wv_nxv4f32_nxv4f32_nxv4f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv4f32_nxv4f32_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -106,7 +108,7 @@ entry: %a = call @llvm.riscv.vfwsub.w.nxv4f32.nxv4f16( %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -116,10 +118,10 @@ declare @llvm.riscv.vfwsub.w.mask.nxv4f32.nxv4f16( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfwsub.w_mask_wv_nxv4f32_nxv4f32_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfwsub.w_mask_wv_nxv4f32_nxv4f32_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv4f32_nxv4f32_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -131,7 +133,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -139,9 +141,9 @@ entry: declare @llvm.riscv.vfwsub.w.nxv8f32.nxv8f16( , , - i64); + iXLen); -define @intrinsic_vfwsub.w_wv_nxv8f32_nxv8f32_nxv8f16( %0, %1, i64 %2) nounwind { +define @intrinsic_vfwsub.w_wv_nxv8f32_nxv8f32_nxv8f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv8f32_nxv8f32_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -151,7 +153,7 @@ entry: %a = call @llvm.riscv.vfwsub.w.nxv8f32.nxv8f16( %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -161,10 +163,10 @@ declare @llvm.riscv.vfwsub.w.mask.nxv8f32.nxv8f16( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfwsub.w_mask_wv_nxv8f32_nxv8f32_nxv8f16( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfwsub.w_mask_wv_nxv8f32_nxv8f32_nxv8f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv8f32_nxv8f32_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -176,7 +178,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -184,9 +186,9 @@ entry: declare @llvm.riscv.vfwsub.w.nxv16f32.nxv16f16( , , - i64); + iXLen); -define @intrinsic_vfwsub.w_wv_nxv16f32_nxv16f32_nxv16f16( %0, %1, i64 %2) nounwind { +define @intrinsic_vfwsub.w_wv_nxv16f32_nxv16f32_nxv16f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv16f32_nxv16f32_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -196,7 +198,7 @@ entry: %a = call @llvm.riscv.vfwsub.w.nxv16f32.nxv16f16( %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -206,10 +208,10 @@ declare @llvm.riscv.vfwsub.w.mask.nxv16f32.nxv16f16( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfwsub.w_mask_wv_nxv16f32_nxv16f32_nxv16f16( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfwsub.w_mask_wv_nxv16f32_nxv16f32_nxv16f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv16f32_nxv16f32_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl4re16.v v24, (a0) @@ -222,7 +224,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -230,9 +232,9 @@ entry: declare @llvm.riscv.vfwsub.w.nxv1f64.nxv1f32( , , - i64); + iXLen); -define @intrinsic_vfwsub.w_wv_nxv1f64_nxv1f64_nxv1f32( %0, %1, i64 %2) nounwind { +define @intrinsic_vfwsub.w_wv_nxv1f64_nxv1f64_nxv1f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv1f64_nxv1f64_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -242,7 +244,7 @@ entry: %a = call @llvm.riscv.vfwsub.w.nxv1f64.nxv1f32( %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -252,10 +254,10 @@ declare @llvm.riscv.vfwsub.w.mask.nxv1f64.nxv1f32( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfwsub.w_mask_wv_nxv1f64_nxv1f64_nxv1f32( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfwsub.w_mask_wv_nxv1f64_nxv1f64_nxv1f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv1f64_nxv1f64_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -267,7 +269,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -275,9 +277,9 @@ entry: declare @llvm.riscv.vfwsub.w.nxv2f64.nxv2f32( , , - i64); + iXLen); -define @intrinsic_vfwsub.w_wv_nxv2f64_nxv2f64_nxv2f32( %0, %1, i64 %2) nounwind { +define @intrinsic_vfwsub.w_wv_nxv2f64_nxv2f64_nxv2f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv2f64_nxv2f64_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -287,7 +289,7 @@ entry: %a = call @llvm.riscv.vfwsub.w.nxv2f64.nxv2f32( %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -297,10 +299,10 @@ declare @llvm.riscv.vfwsub.w.mask.nxv2f64.nxv2f32( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfwsub.w_mask_wv_nxv2f64_nxv2f64_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfwsub.w_mask_wv_nxv2f64_nxv2f64_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv2f64_nxv2f64_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -312,7 +314,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -320,9 +322,9 @@ entry: declare @llvm.riscv.vfwsub.w.nxv4f64.nxv4f32( , , - i64); + iXLen); -define @intrinsic_vfwsub.w_wv_nxv4f64_nxv4f64_nxv4f32( %0, %1, i64 %2) nounwind { +define @intrinsic_vfwsub.w_wv_nxv4f64_nxv4f64_nxv4f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv4f64_nxv4f64_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -332,7 +334,7 @@ entry: %a = call @llvm.riscv.vfwsub.w.nxv4f64.nxv4f32( %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -342,10 +344,10 @@ declare @llvm.riscv.vfwsub.w.mask.nxv4f64.nxv4f32( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfwsub.w_mask_wv_nxv4f64_nxv4f64_nxv4f32( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfwsub.w_mask_wv_nxv4f64_nxv4f64_nxv4f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv4f64_nxv4f64_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -357,7 +359,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -365,9 +367,9 @@ entry: declare @llvm.riscv.vfwsub.w.nxv8f64.nxv8f32( , , - i64); + iXLen); -define @intrinsic_vfwsub.w_wv_nxv8f64_nxv8f64_nxv8f32( %0, %1, i64 %2) nounwind { +define @intrinsic_vfwsub.w_wv_nxv8f64_nxv8f64_nxv8f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv8f64_nxv8f64_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -377,7 +379,7 @@ entry: %a = call @llvm.riscv.vfwsub.w.nxv8f64.nxv8f32( %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -387,10 +389,10 @@ declare @llvm.riscv.vfwsub.w.mask.nxv8f64.nxv8f32( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfwsub.w_mask_wv_nxv8f64_nxv8f64_nxv8f32( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfwsub.w_mask_wv_nxv8f64_nxv8f64_nxv8f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv8f64_nxv8f64_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl4re32.v v24, (a0) @@ -403,7 +405,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -411,9 +413,9 @@ entry: declare @llvm.riscv.vfwsub.w.nxv1f32.f16( , half, - i64); + iXLen); -define @intrinsic_vfwsub.w_wf_nxv1f32_nxv1f32_f16( %0, half %1, i64 %2) nounwind { +define @intrinsic_vfwsub.w_wf_nxv1f32_nxv1f32_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv1f32_nxv1f32_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -423,7 +425,7 @@ entry: %a = call @llvm.riscv.vfwsub.w.nxv1f32.f16( %0, half %1, - i64 %2) + iXLen %2) ret %a } @@ -433,10 +435,10 @@ declare @llvm.riscv.vfwsub.w.mask.nxv1f32.f16( , half, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfwsub.w_mask_wf_nxv1f32_nxv1f32_f16( %0, %1, half %2, %3, i64 %4) nounwind { +define @intrinsic_vfwsub.w_mask_wf_nxv1f32_nxv1f32_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv1f32_nxv1f32_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -448,7 +450,7 @@ entry: %1, half %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -456,9 +458,9 @@ entry: declare @llvm.riscv.vfwsub.w.nxv2f32.f16( , half, - i64); + iXLen); -define @intrinsic_vfwsub.w_wf_nxv2f32_nxv2f32_f16( %0, half %1, i64 %2) nounwind { +define @intrinsic_vfwsub.w_wf_nxv2f32_nxv2f32_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv2f32_nxv2f32_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -468,7 +470,7 @@ entry: %a = call @llvm.riscv.vfwsub.w.nxv2f32.f16( %0, half %1, - i64 %2) + iXLen %2) ret %a } @@ -478,10 +480,10 @@ declare @llvm.riscv.vfwsub.w.mask.nxv2f32.f16( , half, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfwsub.w_mask_wf_nxv2f32_nxv2f32_f16( %0, %1, half %2, %3, i64 %4) nounwind { +define @intrinsic_vfwsub.w_mask_wf_nxv2f32_nxv2f32_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv2f32_nxv2f32_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -493,7 +495,7 @@ entry: %1, half %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -501,9 +503,9 @@ entry: declare @llvm.riscv.vfwsub.w.nxv4f32.f16( , half, - i64); + iXLen); -define @intrinsic_vfwsub.w_wf_nxv4f32_nxv4f32_f16( %0, half %1, i64 %2) nounwind { +define @intrinsic_vfwsub.w_wf_nxv4f32_nxv4f32_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv4f32_nxv4f32_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -513,7 +515,7 @@ entry: %a = call @llvm.riscv.vfwsub.w.nxv4f32.f16( %0, half %1, - i64 %2) + iXLen %2) ret %a } @@ -523,10 +525,10 @@ declare @llvm.riscv.vfwsub.w.mask.nxv4f32.f16( , half, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfwsub.w_mask_wf_nxv4f32_nxv4f32_f16( %0, %1, half %2, %3, i64 %4) nounwind { +define @intrinsic_vfwsub.w_mask_wf_nxv4f32_nxv4f32_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv4f32_nxv4f32_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -538,7 +540,7 @@ entry: %1, half %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -546,9 +548,9 @@ entry: declare @llvm.riscv.vfwsub.w.nxv8f32.f16( , half, - i64); + iXLen); -define @intrinsic_vfwsub.w_wf_nxv8f32_nxv8f32_f16( %0, half %1, i64 %2) nounwind { +define @intrinsic_vfwsub.w_wf_nxv8f32_nxv8f32_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv8f32_nxv8f32_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -558,7 +560,7 @@ entry: %a = call @llvm.riscv.vfwsub.w.nxv8f32.f16( %0, half %1, - i64 %2) + iXLen %2) ret %a } @@ -568,10 +570,10 @@ declare @llvm.riscv.vfwsub.w.mask.nxv8f32.f16( , half, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfwsub.w_mask_wf_nxv8f32_nxv8f32_f16( %0, %1, half %2, %3, i64 %4) nounwind { +define @intrinsic_vfwsub.w_mask_wf_nxv8f32_nxv8f32_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv8f32_nxv8f32_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -583,7 +585,7 @@ entry: %1, half %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -591,9 +593,9 @@ entry: declare @llvm.riscv.vfwsub.w.nxv16f32.f16( , half, - i64); + iXLen); -define @intrinsic_vfwsub.w_wf_nxv16f32_nxv16f32_f16( %0, half %1, i64 %2) nounwind { +define @intrinsic_vfwsub.w_wf_nxv16f32_nxv16f32_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv16f32_nxv16f32_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -603,7 +605,7 @@ entry: %a = call @llvm.riscv.vfwsub.w.nxv16f32.f16( %0, half %1, - i64 %2) + iXLen %2) ret %a } @@ -613,10 +615,10 @@ declare @llvm.riscv.vfwsub.w.mask.nxv16f32.f16( , half, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfwsub.w_mask_wf_nxv16f32_nxv16f32_f16( %0, %1, half %2, %3, i64 %4) nounwind { +define @intrinsic_vfwsub.w_mask_wf_nxv16f32_nxv16f32_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv16f32_nxv16f32_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -628,7 +630,7 @@ entry: %1, half %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -636,9 +638,9 @@ entry: declare @llvm.riscv.vfwsub.w.nxv1f64.f32( , float, - i64); + iXLen); -define @intrinsic_vfwsub.w_wf_nxv1f64_nxv1f64_f32( %0, float %1, i64 %2) nounwind { +define @intrinsic_vfwsub.w_wf_nxv1f64_nxv1f64_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv1f64_nxv1f64_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -648,7 +650,7 @@ entry: %a = call @llvm.riscv.vfwsub.w.nxv1f64.f32( %0, float %1, - i64 %2) + iXLen %2) ret %a } @@ -658,10 +660,10 @@ declare @llvm.riscv.vfwsub.w.mask.nxv1f64.f32( , float, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfwsub.w_mask_wf_nxv1f64_nxv1f64_f32( %0, %1, float %2, %3, i64 %4) nounwind { +define @intrinsic_vfwsub.w_mask_wf_nxv1f64_nxv1f64_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv1f64_nxv1f64_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -673,7 +675,7 @@ entry: %1, float %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -681,9 +683,9 @@ entry: declare @llvm.riscv.vfwsub.w.nxv2f64.f32( , float, - i64); + iXLen); -define @intrinsic_vfwsub.w_wf_nxv2f64_nxv2f64_f32( %0, float %1, i64 %2) nounwind { +define @intrinsic_vfwsub.w_wf_nxv2f64_nxv2f64_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv2f64_nxv2f64_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -693,7 +695,7 @@ entry: %a = call @llvm.riscv.vfwsub.w.nxv2f64.f32( %0, float %1, - i64 %2) + iXLen %2) ret %a } @@ -703,10 +705,10 @@ declare @llvm.riscv.vfwsub.w.mask.nxv2f64.f32( , float, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfwsub.w_mask_wf_nxv2f64_nxv2f64_f32( %0, %1, float %2, %3, i64 %4) nounwind { +define @intrinsic_vfwsub.w_mask_wf_nxv2f64_nxv2f64_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv2f64_nxv2f64_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -718,7 +720,7 @@ entry: %1, float %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -726,9 +728,9 @@ entry: declare @llvm.riscv.vfwsub.w.nxv4f64.f32( , float, - i64); + iXLen); -define @intrinsic_vfwsub.w_wf_nxv4f64_nxv4f64_f32( %0, float %1, i64 %2) nounwind { +define @intrinsic_vfwsub.w_wf_nxv4f64_nxv4f64_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv4f64_nxv4f64_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -738,7 +740,7 @@ entry: %a = call @llvm.riscv.vfwsub.w.nxv4f64.f32( %0, float %1, - i64 %2) + iXLen %2) ret %a } @@ -748,10 +750,10 @@ declare @llvm.riscv.vfwsub.w.mask.nxv4f64.f32( , float, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfwsub.w_mask_wf_nxv4f64_nxv4f64_f32( %0, %1, float %2, %3, i64 %4) nounwind { +define @intrinsic_vfwsub.w_mask_wf_nxv4f64_nxv4f64_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv4f64_nxv4f64_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -763,7 +765,7 @@ entry: %1, float %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -771,9 +773,9 @@ entry: declare @llvm.riscv.vfwsub.w.nxv8f64.f32( , float, - i64); + iXLen); -define @intrinsic_vfwsub.w_wf_nxv8f64_nxv8f64_f32( %0, float %1, i64 %2) nounwind { +define @intrinsic_vfwsub.w_wf_nxv8f64_nxv8f64_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv8f64_nxv8f64_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -783,7 +785,7 @@ entry: %a = call @llvm.riscv.vfwsub.w.nxv8f64.f32( %0, float %1, - i64 %2) + iXLen %2) ret %a } @@ -793,10 +795,10 @@ declare @llvm.riscv.vfwsub.w.mask.nxv8f64.f32( , float, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfwsub.w_mask_wf_nxv8f64_nxv8f64_f32( %0, %1, float %2, %3, i64 %4) nounwind { +define @intrinsic_vfwsub.w_mask_wf_nxv8f64_nxv8f64_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv8f64_nxv8f64_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -808,12 +810,12 @@ entry: %1, float %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } -define @intrinsic_vfwsub.w_mask_wv_tie_nxv1f32_nxv1f32_nxv1f16( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vfwsub.w_mask_wv_tie_nxv1f32_nxv1f32_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_tie_nxv1f32_nxv1f32_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -825,12 +827,12 @@ entry: %0, %1, %2, - i64 %3, i64 1) + iXLen %3, iXLen 1) ret %a } -define @intrinsic_vfwsub.w_mask_wv_tie_nxv2f32_nxv2f32_nxv2f16( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vfwsub.w_mask_wv_tie_nxv2f32_nxv2f32_nxv2f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_tie_nxv2f32_nxv2f32_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -842,12 +844,12 @@ entry: %0, %1, %2, - i64 %3, i64 1) + iXLen %3, iXLen 1) ret %a } -define @intrinsic_vfwsub.w_mask_wv_tie_nxv4f32_nxv4f32_nxv4f16( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vfwsub.w_mask_wv_tie_nxv4f32_nxv4f32_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_tie_nxv4f32_nxv4f32_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -859,12 +861,12 @@ entry: %0, %1, %2, - i64 %3, i64 1) + iXLen %3, iXLen 1) ret %a } -define @intrinsic_vfwsub.w_mask_wv_tie_nxv8f32_nxv8f32_nxv8f16( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vfwsub.w_mask_wv_tie_nxv8f32_nxv8f32_nxv8f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_tie_nxv8f32_nxv8f32_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -876,12 +878,12 @@ entry: %0, %1, %2, - i64 %3, i64 1) + iXLen %3, iXLen 1) ret %a } -define @intrinsic_vfwsub.w_mask_wv_tie_nxv16f32_nxv16f32_nxv16f16( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vfwsub.w_mask_wv_tie_nxv16f32_nxv16f32_nxv16f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_tie_nxv16f32_nxv16f32_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -893,12 +895,12 @@ entry: %0, %1, %2, - i64 %3, i64 1) + iXLen %3, iXLen 1) ret %a } -define @intrinsic_vfwsub.w_mask_wv_tie_nxv1f64_nxv1f64_nxv1f32( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vfwsub.w_mask_wv_tie_nxv1f64_nxv1f64_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_tie_nxv1f64_nxv1f64_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -910,12 +912,12 @@ entry: %0, %1, %2, - i64 %3, i64 1) + iXLen %3, iXLen 1) ret %a } -define @intrinsic_vfwsub.w_mask_wv_tie_nxv2f64_nxv2f64_nxv2f32( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vfwsub.w_mask_wv_tie_nxv2f64_nxv2f64_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_tie_nxv2f64_nxv2f64_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -927,12 +929,12 @@ entry: %0, %1, %2, - i64 %3, i64 1) + iXLen %3, iXLen 1) ret %a } -define @intrinsic_vfwsub.w_mask_wv_tie_nxv4f64_nxv4f64_nxv4f32( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vfwsub.w_mask_wv_tie_nxv4f64_nxv4f64_nxv4f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_tie_nxv4f64_nxv4f64_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -944,12 +946,12 @@ entry: %0, %1, %2, - i64 %3, i64 1) + iXLen %3, iXLen 1) ret %a } -define @intrinsic_vfwsub.w_mask_wv_tie_nxv8f64_nxv8f64_nxv8f32( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vfwsub.w_mask_wv_tie_nxv8f64_nxv8f64_nxv8f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_tie_nxv8f64_nxv8f64_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -961,12 +963,12 @@ entry: %0, %1, %2, - i64 %3, i64 1) + iXLen %3, iXLen 1) ret %a } -define @intrinsic_vfwsub.w_mask_wf_tie_nxv1f32_nxv1f32_f16( %0, half %1, %2, i64 %3) nounwind { +define @intrinsic_vfwsub.w_mask_wf_tie_nxv1f32_nxv1f32_f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_tie_nxv1f32_nxv1f32_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -978,12 +980,12 @@ entry: %0, half %1, %2, - i64 %3, i64 1) + iXLen %3, iXLen 1) ret %a } -define @intrinsic_vfwsub.w_mask_wf_tie_nxv2f32_nxv2f32_f16( %0, half %1, %2, i64 %3) nounwind { +define @intrinsic_vfwsub.w_mask_wf_tie_nxv2f32_nxv2f32_f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_tie_nxv2f32_nxv2f32_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -995,12 +997,12 @@ entry: %0, half %1, %2, - i64 %3, i64 1) + iXLen %3, iXLen 1) ret %a } -define @intrinsic_vfwsub.w_mask_wf_tie_nxv4f32_nxv4f32_f16( %0, half %1, %2, i64 %3) nounwind { +define @intrinsic_vfwsub.w_mask_wf_tie_nxv4f32_nxv4f32_f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_tie_nxv4f32_nxv4f32_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -1012,12 +1014,12 @@ entry: %0, half %1, %2, - i64 %3, i64 1) + iXLen %3, iXLen 1) ret %a } -define @intrinsic_vfwsub.w_mask_wf_tie_nxv8f32_nxv8f32_f16( %0, half %1, %2, i64 %3) nounwind { +define @intrinsic_vfwsub.w_mask_wf_tie_nxv8f32_nxv8f32_f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_tie_nxv8f32_nxv8f32_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -1029,12 +1031,12 @@ entry: %0, half %1, %2, - i64 %3, i64 1) + iXLen %3, iXLen 1) ret %a } -define @intrinsic_vfwsub.w_mask_wf_tie_nxv16f32_nxv16f32_f16( %0, half %1, %2, i64 %3) nounwind { +define @intrinsic_vfwsub.w_mask_wf_tie_nxv16f32_nxv16f32_f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_tie_nxv16f32_nxv16f32_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -1046,12 +1048,12 @@ entry: %0, half %1, %2, - i64 %3, i64 1) + iXLen %3, iXLen 1) ret %a } -define @intrinsic_vfwsub.w_mask_wf_tie_nxv1f64_nxv1f64_f32( %0, float %1, %2, i64 %3) nounwind { +define @intrinsic_vfwsub.w_mask_wf_tie_nxv1f64_nxv1f64_f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_tie_nxv1f64_nxv1f64_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -1063,12 +1065,12 @@ entry: %0, float %1, %2, - i64 %3, i64 1) + iXLen %3, iXLen 1) ret %a } -define @intrinsic_vfwsub.w_mask_wf_tie_nxv2f64_nxv2f64_f32( %0, float %1, %2, i64 %3) nounwind { +define @intrinsic_vfwsub.w_mask_wf_tie_nxv2f64_nxv2f64_f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_tie_nxv2f64_nxv2f64_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -1080,12 +1082,12 @@ entry: %0, float %1, %2, - i64 %3, i64 1) + iXLen %3, iXLen 1) ret %a } -define @intrinsic_vfwsub.w_mask_wf_tie_nxv4f64_nxv4f64_f32( %0, float %1, %2, i64 %3) nounwind { +define @intrinsic_vfwsub.w_mask_wf_tie_nxv4f64_nxv4f64_f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_tie_nxv4f64_nxv4f64_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -1097,12 +1099,12 @@ entry: %0, float %1, %2, - i64 %3, i64 1) + iXLen %3, iXLen 1) ret %a } -define @intrinsic_vfwsub.w_mask_wf_tie_nxv8f64_nxv8f64_f32( %0, float %1, %2, i64 %3) nounwind { +define @intrinsic_vfwsub.w_mask_wf_tie_nxv8f64_nxv8f64_f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_tie_nxv8f64_nxv8f64_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -1114,12 +1116,12 @@ entry: %0, float %1, %2, - i64 %3, i64 1) + iXLen %3, iXLen 1) ret %a } -define @intrinsic_vfwsub.w_wv_untie_nxv1f32_nxv1f32_nxv1f16( %0, %1, i64 %2) nounwind { +define @intrinsic_vfwsub.w_wv_untie_nxv1f32_nxv1f32_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wv_untie_nxv1f32_nxv1f32_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -1130,12 +1132,12 @@ entry: %a = call @llvm.riscv.vfwsub.w.nxv1f32.nxv1f16( %1, %0, - i64 %2) + iXLen %2) ret %a } -define @intrinsic_vfwsub.w_wv_untie_nxv2f32_nxv2f32_nxv2f16( %0, %1, i64 %2) nounwind { +define @intrinsic_vfwsub.w_wv_untie_nxv2f32_nxv2f32_nxv2f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wv_untie_nxv2f32_nxv2f32_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -1146,12 +1148,12 @@ entry: %a = call @llvm.riscv.vfwsub.w.nxv2f32.nxv2f16( %1, %0, - i64 %2) + iXLen %2) ret %a } -define @intrinsic_vfwsub.w_wv_untie_nxv4f32_nxv4f32_nxv4f16( %0, %1, i64 %2) nounwind { +define @intrinsic_vfwsub.w_wv_untie_nxv4f32_nxv4f32_nxv4f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wv_untie_nxv4f32_nxv4f32_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -1162,12 +1164,12 @@ entry: %a = call @llvm.riscv.vfwsub.w.nxv4f32.nxv4f16( %1, %0, - i64 %2) + iXLen %2) ret %a } -define @intrinsic_vfwsub.w_wv_untie_nxv8f32_nxv8f32_nxv8f16( %0, %1, i64 %2) nounwind { +define @intrinsic_vfwsub.w_wv_untie_nxv8f32_nxv8f32_nxv8f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wv_untie_nxv8f32_nxv8f32_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -1178,12 +1180,12 @@ entry: %a = call @llvm.riscv.vfwsub.w.nxv8f32.nxv8f16( %1, %0, - i64 %2) + iXLen %2) ret %a } -define @intrinsic_vfwsub.w_wv_untie_nxv1f64_nxv1f64_nxv1f32( %0, %1, i64 %2) nounwind { +define @intrinsic_vfwsub.w_wv_untie_nxv1f64_nxv1f64_nxv1f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wv_untie_nxv1f64_nxv1f64_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -1194,12 +1196,12 @@ entry: %a = call @llvm.riscv.vfwsub.w.nxv1f64.nxv1f32( %1, %0, - i64 %2) + iXLen %2) ret %a } -define @intrinsic_vfwsub.w_wv_untie_nxv2f64_nxv2f64_nxv2f32( %0, %1, i64 %2) nounwind { +define @intrinsic_vfwsub.w_wv_untie_nxv2f64_nxv2f64_nxv2f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wv_untie_nxv2f64_nxv2f64_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -1210,12 +1212,12 @@ entry: %a = call @llvm.riscv.vfwsub.w.nxv2f64.nxv2f32( %1, %0, - i64 %2) + iXLen %2) ret %a } -define @intrinsic_vfwsub.w_wv_untie_nxv4f64_nxv4f64_nxv4f32( %0, %1, i64 %2) nounwind { +define @intrinsic_vfwsub.w_wv_untie_nxv4f64_nxv4f64_nxv4f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wv_untie_nxv4f64_nxv4f64_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -1226,12 +1228,12 @@ entry: %a = call @llvm.riscv.vfwsub.w.nxv4f64.nxv4f32( %1, %0, - i64 %2) + iXLen %2) ret %a } -define @intrinsic_vfwsub.w_wv_untie_nxv8f64_nxv8f64_nxv8f32( %0, %1, i64 %2) nounwind { +define @intrinsic_vfwsub.w_wv_untie_nxv8f64_nxv8f64_nxv8f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wv_untie_nxv8f64_nxv8f64_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -1242,7 +1244,7 @@ entry: %a = call @llvm.riscv.vfwsub.w.nxv8f64.nxv8f32( %1, %0, - i64 %2) + iXLen %2) ret %a }