From 0b799791807e6b23a568526484f6cdaf0984cf02 Mon Sep 17 00:00:00 2001 From: Craig Topper Date: Sat, 22 Jan 2022 23:05:19 -0800 Subject: [PATCH] [RISCV] Merge some rvv intrinsic test cases that only differ by XLen type. Instead of having a test for i32 XLen and i64 XLen, use sed to replace iXLen with i32/i64 before running llc. This change covers all of the floating point tests. --- llvm/test/CodeGen/RISCV/rvv/vfadd-rv64.ll | 1356 ----------------- .../RISCV/rvv/{vfadd-rv32.ll => vfadd.ll} | 426 +++--- llvm/test/CodeGen/RISCV/rvv/vfclass-rv64.ll | 692 --------- .../RISCV/rvv/{vfclass-rv32.ll => vfclass.ll} | 186 +-- llvm/test/CodeGen/RISCV/rvv/vfcvt-f-x-rv64.ll | 617 -------- .../rvv/{vfcvt-f-x-rv32.ll => vfcvt-f-x.ll} | 216 +-- .../test/CodeGen/RISCV/rvv/vfcvt-f-xu-rv64.ll | 617 -------- .../rvv/{vfcvt-f-xu-rv32.ll => vfcvt-f-xu.ll} | 216 +-- .../CodeGen/RISCV/rvv/vfcvt-rtz-x-f-rv32.ll | 617 -------- ...vfcvt-rtz-x-f-rv64.ll => vfcvt-rtz-x-f.ll} | 216 +-- .../CodeGen/RISCV/rvv/vfcvt-rtz-xu-f-rv64.ll | 617 -------- ...cvt-rtz-xu-f-rv32.ll => vfcvt-rtz-xu-f.ll} | 216 +-- llvm/test/CodeGen/RISCV/rvv/vfcvt-x-f-rv64.ll | 617 -------- .../rvv/{vfcvt-x-f-rv32.ll => vfcvt-x-f.ll} | 216 +-- .../test/CodeGen/RISCV/rvv/vfcvt-xu-f-rv64.ll | 617 -------- .../rvv/{vfcvt-xu-f-rv32.ll => vfcvt-xu-f.ll} | 216 +-- llvm/test/CodeGen/RISCV/rvv/vfdiv-rv64.ll | 1355 ---------------- .../RISCV/rvv/{vfdiv-rv32.ll => vfdiv.ll} | 426 +++--- llvm/test/CodeGen/RISCV/rvv/vfmacc-rv32.ll | 1106 -------------- .../RISCV/rvv/{vfmacc-rv64.ll => vfmacc.ll} | 294 ++-- llvm/test/CodeGen/RISCV/rvv/vfmadd-rv64.ll | 1106 -------------- .../RISCV/rvv/{vfmadd-rv32.ll => vfmadd.ll} | 294 ++-- llvm/test/CodeGen/RISCV/rvv/vfmax-rv32.ll | 1355 ---------------- .../RISCV/rvv/{vfmax-rv64.ll => vfmax.ll} | 426 +++--- llvm/test/CodeGen/RISCV/rvv/vfmerge-rv32.ll | 902 ----------- .../RISCV/rvv/{vfmerge-rv64.ll => vfmerge.ll} | 246 +-- llvm/test/CodeGen/RISCV/rvv/vfmin-rv32.ll | 1355 ---------------- .../RISCV/rvv/{vfmin-rv64.ll => vfmin.ll} | 426 +++--- llvm/test/CodeGen/RISCV/rvv/vfmsac-rv64.ll | 1106 -------------- .../RISCV/rvv/{vfmsac-rv32.ll => vfmsac.ll} | 294 ++-- llvm/test/CodeGen/RISCV/rvv/vfmsub-rv64.ll | 1106 -------------- .../RISCV/rvv/{vfmsub-rv32.ll => vfmsub.ll} | 294 ++-- llvm/test/CodeGen/RISCV/rvv/vfmul-rv64.ll | 1355 ---------------- .../RISCV/rvv/{vfmul-rv32.ll => vfmul.ll} | 426 +++--- llvm/test/CodeGen/RISCV/rvv/vfmv.s.f-rv64.ll | 197 --- .../rvv/{vfmv.s.f-rv32.ll => vfmv.s.f.ll} | 95 +- llvm/test/CodeGen/RISCV/rvv/vfmv.v.f-rv64.ll | 482 ------ .../rvv/{vfmv.v.f-rv32.ll => vfmv.v.f.ll} | 156 +- .../test/CodeGen/RISCV/rvv/vfncvt-f-f-rv64.ll | 380 ----- .../rvv/{vfncvt-f-f-rv32.ll => vfncvt-f-f.ll} | 132 +- .../test/CodeGen/RISCV/rvv/vfncvt-f-x-rv64.ll | 380 ----- .../rvv/{vfncvt-f-x-rv32.ll => vfncvt-f-x.ll} | 132 +- .../CodeGen/RISCV/rvv/vfncvt-f-xu-rv64.ll | 380 ----- .../{vfncvt-f-xu-rv32.ll => vfncvt-f-xu.ll} | 132 +- .../CodeGen/RISCV/rvv/vfncvt-rod-f-f-rv64.ll | 380 ----- ...ncvt-rod-f-f-rv32.ll => vfncvt-rod-f-f.ll} | 132 +- .../CodeGen/RISCV/rvv/vfncvt-rtz-x-f-rv64.ll | 632 -------- ...ncvt-rtz-x-f-rv32.ll => vfncvt-rtz-x-f.ll} | 216 +-- .../CodeGen/RISCV/rvv/vfncvt-rtz-xu-f-rv64.ll | 632 -------- ...vt-rtz-xu-f-rv32.ll => vfncvt-rtz-xu-f.ll} | 216 +-- .../test/CodeGen/RISCV/rvv/vfncvt-x-f-rv64.ll | 632 -------- .../rvv/{vfncvt-x-f-rv32.ll => vfncvt-x-f.ll} | 216 +-- .../CodeGen/RISCV/rvv/vfncvt-xu-f-rv64.ll | 632 -------- .../{vfncvt-xu-f-rv32.ll => vfncvt-xu-f.ll} | 216 +-- llvm/test/CodeGen/RISCV/rvv/vfnmacc-rv64.ll | 1106 -------------- .../RISCV/rvv/{vfnmacc-rv32.ll => vfnmacc.ll} | 294 ++-- llvm/test/CodeGen/RISCV/rvv/vfnmadd-rv64.ll | 1106 -------------- .../RISCV/rvv/{vfnmadd-rv32.ll => vfnmadd.ll} | 294 ++-- llvm/test/CodeGen/RISCV/rvv/vfnmsac-rv64.ll | 1106 -------------- .../RISCV/rvv/{vfnmsac-rv32.ll => vfnmsac.ll} | 294 ++-- llvm/test/CodeGen/RISCV/rvv/vfnmsub-rv32.ll | 1106 -------------- .../RISCV/rvv/{vfnmsub-rv64.ll => vfnmsub.ll} | 294 ++-- llvm/test/CodeGen/RISCV/rvv/vfrdiv-rv64.ll | 677 -------- .../RISCV/rvv/{vfrdiv-rv32.ll => vfrdiv.ll} | 216 +-- llvm/test/CodeGen/RISCV/rvv/vfrec7-rv64.ll | 617 -------- .../RISCV/rvv/{vfrec7-rv32.ll => vfrec7.ll} | 216 +-- llvm/test/CodeGen/RISCV/rvv/vfredmax-rv64.ll | 692 --------- .../rvv/{vfredmax-rv32.ll => vfredmax.ll} | 186 +-- llvm/test/CodeGen/RISCV/rvv/vfredmin-rv64.ll | 692 --------- .../rvv/{vfredmin-rv32.ll => vfredmin.ll} | 186 +-- llvm/test/CodeGen/RISCV/rvv/vfredosum-rv64.ll | 692 --------- .../rvv/{vfredosum-rv32.ll => vfredosum.ll} | 186 +-- llvm/test/CodeGen/RISCV/rvv/vfredusum-rv64.ll | 692 --------- .../rvv/{vfredusum-rv32.ll => vfredusum.ll} | 186 +-- llvm/test/CodeGen/RISCV/rvv/vfrsqrt7-rv64.ll | 617 -------- .../rvv/{vfrsqrt7-rv32.ll => vfrsqrt7.ll} | 216 +-- llvm/test/CodeGen/RISCV/rvv/vfrsub-rv64.ll | 678 --------- .../RISCV/rvv/{vfrsub-rv32.ll => vfrsub.ll} | 216 +-- llvm/test/CodeGen/RISCV/rvv/vfsgnj-rv64.ll | 1355 ---------------- .../RISCV/rvv/{vfsgnj-rv32.ll => vfsgnj.ll} | 426 +++--- llvm/test/CodeGen/RISCV/rvv/vfsgnjn-rv64.ll | 1355 ---------------- .../RISCV/rvv/{vfsgnjn-rv32.ll => vfsgnjn.ll} | 426 +++--- llvm/test/CodeGen/RISCV/rvv/vfsgnjx-rv64.ll | 1355 ---------------- .../RISCV/rvv/{vfsgnjx-rv32.ll => vfsgnjx.ll} | 426 +++--- .../CodeGen/RISCV/rvv/vfslide1down-rv64.ll | 677 -------- .../{vfslide1down-rv32.ll => vfslide1down.ll} | 216 +-- .../test/CodeGen/RISCV/rvv/vfslide1up-rv64.ll | 692 --------- .../rvv/{vfslide1up-rv32.ll => vfslide1up.ll} | 216 +-- llvm/test/CodeGen/RISCV/rvv/vfsqrt-rv32.ll | 548 ------- .../RISCV/rvv/{vfsqrt-rv64.ll => vfsqrt.ll} | 306 ++-- llvm/test/CodeGen/RISCV/rvv/vfsub-rv64.ll | 1356 ----------------- .../RISCV/rvv/{vfsub-rv32.ll => vfsub.ll} | 426 +++--- llvm/test/CodeGen/RISCV/rvv/vfwadd-rv64.ll | 830 ---------- .../RISCV/rvv/{vfwadd-rv32.ll => vfwadd.ll} | 258 ++-- llvm/test/CodeGen/RISCV/rvv/vfwadd.w-rv64.ll | 1248 --------------- .../rvv/{vfwadd.w-rv32.ll => vfwadd.w.ll} | 362 ++--- .../test/CodeGen/RISCV/rvv/vfwcvt-f-f-rv64.ll | 380 ----- .../rvv/{vfwcvt-f-f-rv32.ll => vfwcvt-f-f.ll} | 132 +- .../test/CodeGen/RISCV/rvv/vfwcvt-f-x-rv64.ll | 632 -------- .../rvv/{vfwcvt-f-x-rv32.ll => vfwcvt-f-x.ll} | 216 +-- .../CodeGen/RISCV/rvv/vfwcvt-f-xu-rv64.ll | 632 -------- .../{vfwcvt-f-xu-rv32.ll => vfwcvt-f-xu.ll} | 216 +-- .../CodeGen/RISCV/rvv/vfwcvt-rtz-x-f-rv64.ll | 380 ----- ...wcvt-rtz-x-f-rv32.ll => vfwcvt-rtz-x-f.ll} | 132 +- .../CodeGen/RISCV/rvv/vfwcvt-rtz-xu-f-rv32.ll | 380 ----- ...vt-rtz-xu-f-rv64.ll => vfwcvt-rtz-xu-f.ll} | 132 +- .../test/CodeGen/RISCV/rvv/vfwcvt-x-f-rv64.ll | 380 ----- .../rvv/{vfwcvt-x-f-rv32.ll => vfwcvt-x-f.ll} | 132 +- .../CodeGen/RISCV/rvv/vfwcvt-xu-f-rv64.ll | 380 ----- .../{vfwcvt-xu-f-rv32.ll => vfwcvt-xu-f.ll} | 132 +- llvm/test/CodeGen/RISCV/rvv/vfwmacc-rv64.ll | 830 ---------- .../RISCV/rvv/{vfwmacc-rv32.ll => vfwmacc.ll} | 222 +-- llvm/test/CodeGen/RISCV/rvv/vfwmsac-rv64.ll | 830 ---------- .../RISCV/rvv/{vfwmsac-rv32.ll => vfwmsac.ll} | 222 +-- llvm/test/CodeGen/RISCV/rvv/vfwmul-rv32.ll | 830 ---------- .../RISCV/rvv/{vfwmul-rv64.ll => vfwmul.ll} | 258 ++-- llvm/test/CodeGen/RISCV/rvv/vfwnmacc-rv64.ll | 830 ---------- .../rvv/{vfwnmacc-rv32.ll => vfwnmacc.ll} | 222 +-- llvm/test/CodeGen/RISCV/rvv/vfwnmsac-rv64.ll | 830 ---------- .../rvv/{vfwnmsac-rv32.ll => vfwnmsac.ll} | 222 +-- .../test/CodeGen/RISCV/rvv/vfwredosum-rv64.ll | 508 ------ .../rvv/{vfwredosum-rv32.ll => vfwredosum.ll} | 138 +- .../test/CodeGen/RISCV/rvv/vfwredusum-rv64.ll | 508 ------ .../rvv/{vfwredusum-rv32.ll => vfwredusum.ll} | 138 +- llvm/test/CodeGen/RISCV/rvv/vfwsub-rv64.ll | 830 ---------- .../RISCV/rvv/{vfwsub-rv32.ll => vfwsub.ll} | 258 ++-- llvm/test/CodeGen/RISCV/rvv/vfwsub.w-rv32.ll | 1248 --------------- .../rvv/{vfwsub.w-rv64.ll => vfwsub.w.ll} | 362 ++--- 128 files changed, 7848 insertions(+), 58637 deletions(-) delete mode 100644 llvm/test/CodeGen/RISCV/rvv/vfadd-rv64.ll rename llvm/test/CodeGen/RISCV/rvv/{vfadd-rv32.ll => vfadd.ll} (86%) delete mode 100644 llvm/test/CodeGen/RISCV/rvv/vfclass-rv64.ll rename llvm/test/CodeGen/RISCV/rvv/{vfclass-rv32.ll => vfclass.ll} (91%) delete mode 100644 llvm/test/CodeGen/RISCV/rvv/vfcvt-f-x-rv64.ll rename llvm/test/CodeGen/RISCV/rvv/{vfcvt-f-x-rv32.ll => vfcvt-f-x.ll} (87%) delete mode 100644 llvm/test/CodeGen/RISCV/rvv/vfcvt-f-xu-rv64.ll rename llvm/test/CodeGen/RISCV/rvv/{vfcvt-f-xu-rv32.ll => vfcvt-f-xu.ll} (87%) delete mode 100644 llvm/test/CodeGen/RISCV/rvv/vfcvt-rtz-x-f-rv32.ll rename llvm/test/CodeGen/RISCV/rvv/{vfcvt-rtz-x-f-rv64.ll => vfcvt-rtz-x-f.ll} (87%) delete mode 100644 llvm/test/CodeGen/RISCV/rvv/vfcvt-rtz-xu-f-rv64.ll rename llvm/test/CodeGen/RISCV/rvv/{vfcvt-rtz-xu-f-rv32.ll => vfcvt-rtz-xu-f.ll} (87%) delete mode 100644 llvm/test/CodeGen/RISCV/rvv/vfcvt-x-f-rv64.ll rename llvm/test/CodeGen/RISCV/rvv/{vfcvt-x-f-rv32.ll => vfcvt-x-f.ll} (87%) delete mode 100644 llvm/test/CodeGen/RISCV/rvv/vfcvt-xu-f-rv64.ll rename llvm/test/CodeGen/RISCV/rvv/{vfcvt-xu-f-rv32.ll => vfcvt-xu-f.ll} (87%) delete mode 100644 llvm/test/CodeGen/RISCV/rvv/vfdiv-rv64.ll rename llvm/test/CodeGen/RISCV/rvv/{vfdiv-rv32.ll => vfdiv.ll} (86%) delete mode 100644 llvm/test/CodeGen/RISCV/rvv/vfmacc-rv32.ll rename llvm/test/CodeGen/RISCV/rvv/{vfmacc-rv64.ll => vfmacc.ll} (89%) delete mode 100644 llvm/test/CodeGen/RISCV/rvv/vfmadd-rv64.ll rename llvm/test/CodeGen/RISCV/rvv/{vfmadd-rv32.ll => vfmadd.ll} (89%) delete mode 100644 llvm/test/CodeGen/RISCV/rvv/vfmax-rv32.ll rename llvm/test/CodeGen/RISCV/rvv/{vfmax-rv64.ll => vfmax.ll} (86%) delete mode 100644 llvm/test/CodeGen/RISCV/rvv/vfmerge-rv32.ll rename llvm/test/CodeGen/RISCV/rvv/{vfmerge-rv64.ll => vfmerge.ll} (88%) delete mode 100644 llvm/test/CodeGen/RISCV/rvv/vfmin-rv32.ll rename llvm/test/CodeGen/RISCV/rvv/{vfmin-rv64.ll => vfmin.ll} (86%) delete mode 100644 llvm/test/CodeGen/RISCV/rvv/vfmsac-rv64.ll rename llvm/test/CodeGen/RISCV/rvv/{vfmsac-rv32.ll => vfmsac.ll} (89%) delete mode 100644 llvm/test/CodeGen/RISCV/rvv/vfmsub-rv64.ll rename llvm/test/CodeGen/RISCV/rvv/{vfmsub-rv32.ll => vfmsub.ll} (89%) delete mode 100644 llvm/test/CodeGen/RISCV/rvv/vfmul-rv64.ll rename llvm/test/CodeGen/RISCV/rvv/{vfmul-rv32.ll => vfmul.ll} (86%) delete mode 100644 llvm/test/CodeGen/RISCV/rvv/vfmv.s.f-rv64.ll rename llvm/test/CodeGen/RISCV/rvv/{vfmv.s.f-rv32.ll => vfmv.s.f.ll} (74%) delete mode 100644 llvm/test/CodeGen/RISCV/rvv/vfmv.v.f-rv64.ll rename llvm/test/CodeGen/RISCV/rvv/{vfmv.v.f-rv32.ll => vfmv.v.f.ll} (82%) delete mode 100644 llvm/test/CodeGen/RISCV/rvv/vfncvt-f-f-rv64.ll rename llvm/test/CodeGen/RISCV/rvv/{vfncvt-f-f-rv32.ll => vfncvt-f-f.ll} (86%) delete mode 100644 llvm/test/CodeGen/RISCV/rvv/vfncvt-f-x-rv64.ll rename llvm/test/CodeGen/RISCV/rvv/{vfncvt-f-x-rv32.ll => vfncvt-f-x.ll} (87%) delete mode 100644 llvm/test/CodeGen/RISCV/rvv/vfncvt-f-xu-rv64.ll rename llvm/test/CodeGen/RISCV/rvv/{vfncvt-f-xu-rv32.ll => vfncvt-f-xu.ll} (87%) delete mode 100644 llvm/test/CodeGen/RISCV/rvv/vfncvt-rod-f-f-rv64.ll rename llvm/test/CodeGen/RISCV/rvv/{vfncvt-rod-f-f-rv32.ll => vfncvt-rod-f-f.ll} (86%) delete mode 100644 llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-x-f-rv64.ll rename llvm/test/CodeGen/RISCV/rvv/{vfncvt-rtz-x-f-rv32.ll => vfncvt-rtz-x-f.ll} (87%) delete mode 100644 llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-xu-f-rv64.ll rename llvm/test/CodeGen/RISCV/rvv/{vfncvt-rtz-xu-f-rv32.ll => vfncvt-rtz-xu-f.ll} (87%) delete mode 100644 llvm/test/CodeGen/RISCV/rvv/vfncvt-x-f-rv64.ll rename llvm/test/CodeGen/RISCV/rvv/{vfncvt-x-f-rv32.ll => vfncvt-x-f.ll} (87%) delete mode 100644 llvm/test/CodeGen/RISCV/rvv/vfncvt-xu-f-rv64.ll rename llvm/test/CodeGen/RISCV/rvv/{vfncvt-xu-f-rv32.ll => vfncvt-xu-f.ll} (87%) delete mode 100644 llvm/test/CodeGen/RISCV/rvv/vfnmacc-rv64.ll rename llvm/test/CodeGen/RISCV/rvv/{vfnmacc-rv32.ll => vfnmacc.ll} (90%) delete mode 100644 llvm/test/CodeGen/RISCV/rvv/vfnmadd-rv64.ll rename llvm/test/CodeGen/RISCV/rvv/{vfnmadd-rv32.ll => vfnmadd.ll} (90%) delete mode 100644 llvm/test/CodeGen/RISCV/rvv/vfnmsac-rv64.ll rename llvm/test/CodeGen/RISCV/rvv/{vfnmsac-rv32.ll => vfnmsac.ll} (90%) delete mode 100644 llvm/test/CodeGen/RISCV/rvv/vfnmsub-rv32.ll rename llvm/test/CodeGen/RISCV/rvv/{vfnmsub-rv64.ll => vfnmsub.ll} (90%) delete mode 100644 llvm/test/CodeGen/RISCV/rvv/vfrdiv-rv64.ll rename llvm/test/CodeGen/RISCV/rvv/{vfrdiv-rv32.ll => vfrdiv.ll} (86%) delete mode 100644 llvm/test/CodeGen/RISCV/rvv/vfrec7-rv64.ll rename llvm/test/CodeGen/RISCV/rvv/{vfrec7-rv32.ll => vfrec7.ll} (87%) delete mode 100644 llvm/test/CodeGen/RISCV/rvv/vfredmax-rv64.ll rename llvm/test/CodeGen/RISCV/rvv/{vfredmax-rv32.ll => vfredmax.ll} (88%) delete mode 100644 llvm/test/CodeGen/RISCV/rvv/vfredmin-rv64.ll rename llvm/test/CodeGen/RISCV/rvv/{vfredmin-rv32.ll => vfredmin.ll} (88%) delete mode 100644 llvm/test/CodeGen/RISCV/rvv/vfredosum-rv64.ll rename llvm/test/CodeGen/RISCV/rvv/{vfredosum-rv32.ll => vfredosum.ll} (88%) delete mode 100644 llvm/test/CodeGen/RISCV/rvv/vfredusum-rv64.ll rename llvm/test/CodeGen/RISCV/rvv/{vfredusum-rv32.ll => vfredusum.ll} (88%) delete mode 100644 llvm/test/CodeGen/RISCV/rvv/vfrsqrt7-rv64.ll rename llvm/test/CodeGen/RISCV/rvv/{vfrsqrt7-rv32.ll => vfrsqrt7.ll} (87%) delete mode 100644 llvm/test/CodeGen/RISCV/rvv/vfrsub-rv64.ll rename llvm/test/CodeGen/RISCV/rvv/{vfrsub-rv32.ll => vfrsub.ll} (86%) delete mode 100644 llvm/test/CodeGen/RISCV/rvv/vfsgnj-rv64.ll rename llvm/test/CodeGen/RISCV/rvv/{vfsgnj-rv32.ll => vfsgnj.ll} (86%) delete mode 100644 llvm/test/CodeGen/RISCV/rvv/vfsgnjn-rv64.ll rename llvm/test/CodeGen/RISCV/rvv/{vfsgnjn-rv32.ll => vfsgnjn.ll} (86%) delete mode 100644 llvm/test/CodeGen/RISCV/rvv/vfsgnjx-rv64.ll rename llvm/test/CodeGen/RISCV/rvv/{vfsgnjx-rv32.ll => vfsgnjx.ll} (86%) delete mode 100644 llvm/test/CodeGen/RISCV/rvv/vfslide1down-rv64.ll rename llvm/test/CodeGen/RISCV/rvv/{vfslide1down-rv32.ll => vfslide1down.ll} (85%) delete mode 100644 llvm/test/CodeGen/RISCV/rvv/vfslide1up-rv64.ll rename llvm/test/CodeGen/RISCV/rvv/{vfslide1up-rv32.ll => vfslide1up.ll} (86%) delete mode 100644 llvm/test/CodeGen/RISCV/rvv/vfsqrt-rv32.ll rename llvm/test/CodeGen/RISCV/rvv/{vfsqrt-rv64.ll => vfsqrt.ll} (80%) delete mode 100644 llvm/test/CodeGen/RISCV/rvv/vfsub-rv64.ll rename llvm/test/CodeGen/RISCV/rvv/{vfsub-rv32.ll => vfsub.ll} (86%) delete mode 100644 llvm/test/CodeGen/RISCV/rvv/vfwadd-rv64.ll rename llvm/test/CodeGen/RISCV/rvv/{vfwadd-rv32.ll => vfwadd.ll} (86%) delete mode 100644 llvm/test/CodeGen/RISCV/rvv/vfwadd.w-rv64.ll rename llvm/test/CodeGen/RISCV/rvv/{vfwadd.w-rv32.ll => vfwadd.w.ll} (88%) delete mode 100644 llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-f-rv64.ll rename llvm/test/CodeGen/RISCV/rvv/{vfwcvt-f-f-rv32.ll => vfwcvt-f-f.ll} (86%) delete mode 100644 llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-x-rv64.ll rename llvm/test/CodeGen/RISCV/rvv/{vfwcvt-f-x-rv32.ll => vfwcvt-f-x.ll} (87%) delete mode 100644 llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-xu-rv64.ll rename llvm/test/CodeGen/RISCV/rvv/{vfwcvt-f-xu-rv32.ll => vfwcvt-f-xu.ll} (87%) delete mode 100644 llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-x-f-rv64.ll rename llvm/test/CodeGen/RISCV/rvv/{vfwcvt-rtz-x-f-rv32.ll => vfwcvt-rtz-x-f.ll} (87%) delete mode 100644 llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-xu-f-rv32.ll rename llvm/test/CodeGen/RISCV/rvv/{vfwcvt-rtz-xu-f-rv64.ll => vfwcvt-rtz-xu-f.ll} (86%) delete mode 100644 llvm/test/CodeGen/RISCV/rvv/vfwcvt-x-f-rv64.ll rename llvm/test/CodeGen/RISCV/rvv/{vfwcvt-x-f-rv32.ll => vfwcvt-x-f.ll} (87%) delete mode 100644 llvm/test/CodeGen/RISCV/rvv/vfwcvt-xu-f-rv64.ll rename llvm/test/CodeGen/RISCV/rvv/{vfwcvt-xu-f-rv32.ll => vfwcvt-xu-f.ll} (87%) delete mode 100644 llvm/test/CodeGen/RISCV/rvv/vfwmacc-rv64.ll rename llvm/test/CodeGen/RISCV/rvv/{vfwmacc-rv32.ll => vfwmacc.ll} (89%) delete mode 100644 llvm/test/CodeGen/RISCV/rvv/vfwmsac-rv64.ll rename llvm/test/CodeGen/RISCV/rvv/{vfwmsac-rv32.ll => vfwmsac.ll} (89%) delete mode 100644 llvm/test/CodeGen/RISCV/rvv/vfwmul-rv32.ll rename llvm/test/CodeGen/RISCV/rvv/{vfwmul-rv64.ll => vfwmul.ll} (86%) delete mode 100644 llvm/test/CodeGen/RISCV/rvv/vfwnmacc-rv64.ll rename llvm/test/CodeGen/RISCV/rvv/{vfwnmacc-rv32.ll => vfwnmacc.ll} (89%) delete mode 100644 llvm/test/CodeGen/RISCV/rvv/vfwnmsac-rv64.ll rename llvm/test/CodeGen/RISCV/rvv/{vfwnmsac-rv32.ll => vfwnmsac.ll} (89%) delete mode 100644 llvm/test/CodeGen/RISCV/rvv/vfwredosum-rv64.ll rename llvm/test/CodeGen/RISCV/rvv/{vfwredosum-rv32.ll => vfwredosum.ll} (88%) delete mode 100644 llvm/test/CodeGen/RISCV/rvv/vfwredusum-rv64.ll rename llvm/test/CodeGen/RISCV/rvv/{vfwredusum-rv32.ll => vfwredusum.ll} (88%) delete mode 100644 llvm/test/CodeGen/RISCV/rvv/vfwsub-rv64.ll rename llvm/test/CodeGen/RISCV/rvv/{vfwsub-rv32.ll => vfwsub.ll} (86%) delete mode 100644 llvm/test/CodeGen/RISCV/rvv/vfwsub.w-rv32.ll rename llvm/test/CodeGen/RISCV/rvv/{vfwsub.w-rv64.ll => vfwsub.w.ll} (88%) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfadd-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfadd-rv64.ll deleted file mode 100644 index b91d86befbc8..000000000000 --- a/llvm/test/CodeGen/RISCV/rvv/vfadd-rv64.ll +++ /dev/null @@ -1,1356 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v -mattr=+zfh \ -; RUN: -mattr=+d -verify-machineinstrs \ -; RUN: -target-abi=lp64d < %s | FileCheck %s -declare @llvm.riscv.vfadd.nxv1f16.nxv1f16( - , - , - i64); - -define @intrinsic_vfadd_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfadd_vv_nxv1f16_nxv1f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfadd.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfadd.nxv1f16.nxv1f16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16( - , - , - , - , - i64, - i64); - -define @intrinsic_vfadd_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv1f16_nxv1f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfadd.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfadd.nxv2f16.nxv2f16( - , - , - i64); - -define @intrinsic_vfadd_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfadd_vv_nxv2f16_nxv2f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfadd.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfadd.nxv2f16.nxv2f16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfadd.mask.nxv2f16.nxv2f16( - , - , - , - , - i64, - i64); - -define @intrinsic_vfadd_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv2f16_nxv2f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfadd.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfadd.mask.nxv2f16.nxv2f16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfadd.nxv4f16.nxv4f16( - , - , - i64); - -define @intrinsic_vfadd_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfadd_vv_nxv4f16_nxv4f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfadd.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfadd.nxv4f16.nxv4f16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfadd.mask.nxv4f16.nxv4f16( - , - , - , - , - i64, - i64); - -define @intrinsic_vfadd_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv4f16_nxv4f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfadd.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfadd.mask.nxv4f16.nxv4f16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfadd.nxv8f16.nxv8f16( - , - , - i64); - -define @intrinsic_vfadd_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfadd_vv_nxv8f16_nxv8f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfadd.vv v8, v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfadd.nxv8f16.nxv8f16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfadd.mask.nxv8f16.nxv8f16( - , - , - , - , - i64, - i64); - -define @intrinsic_vfadd_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv8f16_nxv8f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfadd.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfadd.mask.nxv8f16.nxv8f16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfadd.nxv16f16.nxv16f16( - , - , - i64); - -define @intrinsic_vfadd_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfadd_vv_nxv16f16_nxv16f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfadd.vv v8, v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfadd.nxv16f16.nxv16f16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfadd.mask.nxv16f16.nxv16f16( - , - , - , - , - i64, - i64); - -define @intrinsic_vfadd_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv16f16_nxv16f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfadd.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfadd.mask.nxv16f16.nxv16f16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfadd.nxv32f16.nxv32f16( - , - , - i64); - -define @intrinsic_vfadd_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfadd_vv_nxv32f16_nxv32f16_nxv32f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu -; CHECK-NEXT: vfadd.vv v8, v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfadd.nxv32f16.nxv32f16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfadd.mask.nxv32f16.nxv32f16( - , - , - , - , - i64, - i64); - -define @intrinsic_vfadd_mask_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv32f16_nxv32f16_nxv32f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu -; CHECK-NEXT: vfadd.vv v8, v16, v24, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfadd.mask.nxv32f16.nxv32f16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfadd.nxv1f32.nxv1f32( - , - , - i64); - -define @intrinsic_vfadd_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfadd_vv_nxv1f32_nxv1f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfadd.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfadd.nxv1f32.nxv1f32( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32( - , - , - , - , - i64, - i64); - -define @intrinsic_vfadd_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv1f32_nxv1f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfadd.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfadd.nxv2f32.nxv2f32( - , - , - i64); - -define @intrinsic_vfadd_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfadd_vv_nxv2f32_nxv2f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfadd.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfadd.nxv2f32.nxv2f32( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32( - , - , - , - , - i64, - i64); - -define @intrinsic_vfadd_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv2f32_nxv2f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfadd.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfadd.nxv4f32.nxv4f32( - , - , - i64); - -define @intrinsic_vfadd_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfadd_vv_nxv4f32_nxv4f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfadd.vv v8, v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfadd.nxv4f32.nxv4f32( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32( - , - , - , - , - i64, - i64); - -define @intrinsic_vfadd_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv4f32_nxv4f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfadd.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfadd.nxv8f32.nxv8f32( - , - , - i64); - -define @intrinsic_vfadd_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfadd_vv_nxv8f32_nxv8f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfadd.vv v8, v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfadd.nxv8f32.nxv8f32( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32( - , - , - , - , - i64, - i64); - -define @intrinsic_vfadd_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv8f32_nxv8f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfadd.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfadd.nxv16f32.nxv16f32( - , - , - i64); - -define @intrinsic_vfadd_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfadd_vv_nxv16f32_nxv16f32_nxv16f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu -; CHECK-NEXT: vfadd.vv v8, v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfadd.nxv16f32.nxv16f32( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32( - , - , - , - , - i64, - i64); - -define @intrinsic_vfadd_mask_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv16f32_nxv16f32_nxv16f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vl8re32.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu -; CHECK-NEXT: vfadd.vv v8, v16, v24, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfadd.nxv1f64.nxv1f64( - , - , - i64); - -define @intrinsic_vfadd_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfadd_vv_nxv1f64_nxv1f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vfadd.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfadd.nxv1f64.nxv1f64( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfadd.mask.nxv1f64.nxv1f64( - , - , - , - , - i64, - i64); - -define @intrinsic_vfadd_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv1f64_nxv1f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vfadd.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfadd.mask.nxv1f64.nxv1f64( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfadd.nxv2f64.nxv2f64( - , - , - i64); - -define @intrinsic_vfadd_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfadd_vv_nxv2f64_nxv2f64_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vfadd.vv v8, v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfadd.nxv2f64.nxv2f64( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfadd.mask.nxv2f64.nxv2f64( - , - , - , - , - i64, - i64); - -define @intrinsic_vfadd_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv2f64_nxv2f64_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vfadd.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfadd.mask.nxv2f64.nxv2f64( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfadd.nxv4f64.nxv4f64( - , - , - i64); - -define @intrinsic_vfadd_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfadd_vv_nxv4f64_nxv4f64_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vfadd.vv v8, v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfadd.nxv4f64.nxv4f64( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfadd.mask.nxv4f64.nxv4f64( - , - , - , - , - i64, - i64); - -define @intrinsic_vfadd_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv4f64_nxv4f64_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vfadd.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfadd.mask.nxv4f64.nxv4f64( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfadd.nxv8f64.nxv8f64( - , - , - i64); - -define @intrinsic_vfadd_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfadd_vv_nxv8f64_nxv8f64_nxv8f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu -; CHECK-NEXT: vfadd.vv v8, v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfadd.nxv8f64.nxv8f64( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64( - , - , - , - , - i64, - i64); - -define @intrinsic_vfadd_mask_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv8f64_nxv8f64_nxv8f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vl8re64.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu -; CHECK-NEXT: vfadd.vv v8, v16, v24, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfadd.nxv1f16.f16( - , - half, - i64); - -define @intrinsic_vfadd_vf_nxv1f16_nxv1f16_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfadd_vf_nxv1f16_nxv1f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfadd.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfadd.nxv1f16.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfadd.mask.nxv1f16.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfadd_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv1f16_nxv1f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfadd.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfadd.mask.nxv1f16.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfadd.nxv2f16.f16( - , - half, - i64); - -define @intrinsic_vfadd_vf_nxv2f16_nxv2f16_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfadd_vf_nxv2f16_nxv2f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfadd.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfadd.nxv2f16.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfadd.mask.nxv2f16.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfadd_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv2f16_nxv2f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfadd.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfadd.mask.nxv2f16.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfadd.nxv4f16.f16( - , - half, - i64); - -define @intrinsic_vfadd_vf_nxv4f16_nxv4f16_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfadd_vf_nxv4f16_nxv4f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfadd.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfadd.nxv4f16.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfadd.mask.nxv4f16.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfadd_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv4f16_nxv4f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfadd.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfadd.mask.nxv4f16.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfadd.nxv8f16.f16( - , - half, - i64); - -define @intrinsic_vfadd_vf_nxv8f16_nxv8f16_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfadd_vf_nxv8f16_nxv8f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfadd.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfadd.nxv8f16.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfadd.mask.nxv8f16.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfadd_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv8f16_nxv8f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfadd.vf v8, v10, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfadd.mask.nxv8f16.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfadd.nxv16f16.f16( - , - half, - i64); - -define @intrinsic_vfadd_vf_nxv16f16_nxv16f16_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfadd_vf_nxv16f16_nxv16f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfadd.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfadd.nxv16f16.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfadd.mask.nxv16f16.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfadd_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv16f16_nxv16f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfadd.vf v8, v12, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfadd.mask.nxv16f16.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfadd.nxv32f16.f16( - , - half, - i64); - -define @intrinsic_vfadd_vf_nxv32f16_nxv32f16_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfadd_vf_nxv32f16_nxv32f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu -; CHECK-NEXT: vfadd.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfadd.nxv32f16.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfadd.mask.nxv32f16.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfadd_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv32f16_nxv32f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu -; CHECK-NEXT: vfadd.vf v8, v16, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfadd.mask.nxv32f16.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfadd.nxv1f32.f32( - , - float, - i64); - -define @intrinsic_vfadd_vf_nxv1f32_nxv1f32_f32( %0, float %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfadd_vf_nxv1f32_nxv1f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfadd.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfadd.nxv1f32.f32( - %0, - float %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfadd.mask.nxv1f32.f32( - , - , - float, - , - i64, - i64); - -define @intrinsic_vfadd_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv1f32_nxv1f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfadd.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfadd.mask.nxv1f32.f32( - %0, - %1, - float %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfadd.nxv2f32.f32( - , - float, - i64); - -define @intrinsic_vfadd_vf_nxv2f32_nxv2f32_f32( %0, float %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfadd_vf_nxv2f32_nxv2f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfadd.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfadd.nxv2f32.f32( - %0, - float %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfadd.mask.nxv2f32.f32( - , - , - float, - , - i64, - i64); - -define @intrinsic_vfadd_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv2f32_nxv2f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfadd.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfadd.mask.nxv2f32.f32( - %0, - %1, - float %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfadd.nxv4f32.f32( - , - float, - i64); - -define @intrinsic_vfadd_vf_nxv4f32_nxv4f32_f32( %0, float %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfadd_vf_nxv4f32_nxv4f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfadd.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfadd.nxv4f32.f32( - %0, - float %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfadd.mask.nxv4f32.f32( - , - , - float, - , - i64, - i64); - -define @intrinsic_vfadd_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv4f32_nxv4f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfadd.vf v8, v10, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfadd.mask.nxv4f32.f32( - %0, - %1, - float %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfadd.nxv8f32.f32( - , - float, - i64); - -define @intrinsic_vfadd_vf_nxv8f32_nxv8f32_f32( %0, float %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfadd_vf_nxv8f32_nxv8f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfadd.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfadd.nxv8f32.f32( - %0, - float %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfadd.mask.nxv8f32.f32( - , - , - float, - , - i64, - i64); - -define @intrinsic_vfadd_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv8f32_nxv8f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfadd.vf v8, v12, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfadd.mask.nxv8f32.f32( - %0, - %1, - float %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfadd.nxv16f32.f32( - , - float, - i64); - -define @intrinsic_vfadd_vf_nxv16f32_nxv16f32_f32( %0, float %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfadd_vf_nxv16f32_nxv16f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu -; CHECK-NEXT: vfadd.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfadd.nxv16f32.f32( - %0, - float %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfadd.mask.nxv16f32.f32( - , - , - float, - , - i64, - i64); - -define @intrinsic_vfadd_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv16f32_nxv16f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu -; CHECK-NEXT: vfadd.vf v8, v16, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfadd.mask.nxv16f32.f32( - %0, - %1, - float %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfadd.nxv1f64.f64( - , - double, - i64); - -define @intrinsic_vfadd_vf_nxv1f64_nxv1f64_f64( %0, double %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfadd_vf_nxv1f64_nxv1f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vfadd.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfadd.nxv1f64.f64( - %0, - double %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfadd.mask.nxv1f64.f64( - , - , - double, - , - i64, - i64); - -define @intrinsic_vfadd_mask_vf_nxv1f64_nxv1f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv1f64_nxv1f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vfadd.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfadd.mask.nxv1f64.f64( - %0, - %1, - double %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfadd.nxv2f64.f64( - , - double, - i64); - -define @intrinsic_vfadd_vf_nxv2f64_nxv2f64_f64( %0, double %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfadd_vf_nxv2f64_nxv2f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vfadd.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfadd.nxv2f64.f64( - %0, - double %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfadd.mask.nxv2f64.f64( - , - , - double, - , - i64, - i64); - -define @intrinsic_vfadd_mask_vf_nxv2f64_nxv2f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv2f64_nxv2f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vfadd.vf v8, v10, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfadd.mask.nxv2f64.f64( - %0, - %1, - double %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfadd.nxv4f64.f64( - , - double, - i64); - -define @intrinsic_vfadd_vf_nxv4f64_nxv4f64_f64( %0, double %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfadd_vf_nxv4f64_nxv4f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vfadd.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfadd.nxv4f64.f64( - %0, - double %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfadd.mask.nxv4f64.f64( - , - , - double, - , - i64, - i64); - -define @intrinsic_vfadd_mask_vf_nxv4f64_nxv4f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv4f64_nxv4f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vfadd.vf v8, v12, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfadd.mask.nxv4f64.f64( - %0, - %1, - double %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfadd.nxv8f64.f64( - , - double, - i64); - -define @intrinsic_vfadd_vf_nxv8f64_nxv8f64_f64( %0, double %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfadd_vf_nxv8f64_nxv8f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu -; CHECK-NEXT: vfadd.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfadd.nxv8f64.f64( - %0, - double %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfadd.mask.nxv8f64.f64( - , - , - double, - , - i64, - i64); - -define @intrinsic_vfadd_mask_vf_nxv8f64_nxv8f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv8f64_nxv8f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu -; CHECK-NEXT: vfadd.vf v8, v16, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfadd.mask.nxv8f64.f64( - %0, - %1, - double %2, - %3, - i64 %4, i64 1) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfadd-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfadd.ll similarity index 86% rename from llvm/test/CodeGen/RISCV/rvv/vfadd-rv32.ll rename to llvm/test/CodeGen/RISCV/rvv/vfadd.ll index 5df1881bffb2..041580b2b49d 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfadd-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfadd.ll @@ -1,12 +1,14 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: -target-abi=ilp32d < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfadd.nxv1f16.nxv1f16( , , - i32); + iXLen); -define @intrinsic_vfadd_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, i32 %2) nounwind { +define @intrinsic_vfadd_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -16,7 +18,7 @@ entry: %a = call @llvm.riscv.vfadd.nxv1f16.nxv1f16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -26,10 +28,10 @@ declare @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfadd_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfadd_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -41,7 +43,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -49,9 +51,9 @@ entry: declare @llvm.riscv.vfadd.nxv2f16.nxv2f16( , , - i32); + iXLen); -define @intrinsic_vfadd_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, i32 %2) nounwind { +define @intrinsic_vfadd_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -61,7 +63,7 @@ entry: %a = call @llvm.riscv.vfadd.nxv2f16.nxv2f16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -71,10 +73,10 @@ declare @llvm.riscv.vfadd.mask.nxv2f16.nxv2f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfadd_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfadd_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -86,7 +88,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -94,9 +96,9 @@ entry: declare @llvm.riscv.vfadd.nxv4f16.nxv4f16( , , - i32); + iXLen); -define @intrinsic_vfadd_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, i32 %2) nounwind { +define @intrinsic_vfadd_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -106,7 +108,7 @@ entry: %a = call @llvm.riscv.vfadd.nxv4f16.nxv4f16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -116,10 +118,10 @@ declare @llvm.riscv.vfadd.mask.nxv4f16.nxv4f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfadd_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfadd_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -131,7 +133,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -139,9 +141,9 @@ entry: declare @llvm.riscv.vfadd.nxv8f16.nxv8f16( , , - i32); + iXLen); -define @intrinsic_vfadd_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, i32 %2) nounwind { +define @intrinsic_vfadd_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -151,7 +153,7 @@ entry: %a = call @llvm.riscv.vfadd.nxv8f16.nxv8f16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -161,10 +163,10 @@ declare @llvm.riscv.vfadd.mask.nxv8f16.nxv8f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfadd_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfadd_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -176,7 +178,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -184,9 +186,9 @@ entry: declare @llvm.riscv.vfadd.nxv16f16.nxv16f16( , , - i32); + iXLen); -define @intrinsic_vfadd_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, i32 %2) nounwind { +define @intrinsic_vfadd_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -196,7 +198,7 @@ entry: %a = call @llvm.riscv.vfadd.nxv16f16.nxv16f16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -206,10 +208,10 @@ declare @llvm.riscv.vfadd.mask.nxv16f16.nxv16f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfadd_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfadd_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -221,7 +223,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -229,9 +231,9 @@ entry: declare @llvm.riscv.vfadd.nxv32f16.nxv32f16( , , - i32); + iXLen); -define @intrinsic_vfadd_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, i32 %2) nounwind { +define @intrinsic_vfadd_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vv_nxv32f16_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu @@ -241,7 +243,7 @@ entry: %a = call @llvm.riscv.vfadd.nxv32f16.nxv32f16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -251,10 +253,10 @@ declare @llvm.riscv.vfadd.mask.nxv32f16.nxv32f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfadd_mask_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfadd_mask_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv32f16_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re16.v v24, (a0) @@ -267,7 +269,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -275,9 +277,9 @@ entry: declare @llvm.riscv.vfadd.nxv1f32.nxv1f32( , , - i32); + iXLen); -define @intrinsic_vfadd_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, i32 %2) nounwind { +define @intrinsic_vfadd_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -287,7 +289,7 @@ entry: %a = call @llvm.riscv.vfadd.nxv1f32.nxv1f32( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -297,10 +299,10 @@ declare @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfadd_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfadd_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -312,7 +314,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -320,9 +322,9 @@ entry: declare @llvm.riscv.vfadd.nxv2f32.nxv2f32( , , - i32); + iXLen); -define @intrinsic_vfadd_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, i32 %2) nounwind { +define @intrinsic_vfadd_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -332,7 +334,7 @@ entry: %a = call @llvm.riscv.vfadd.nxv2f32.nxv2f32( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -342,10 +344,10 @@ declare @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfadd_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfadd_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -357,7 +359,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -365,9 +367,9 @@ entry: declare @llvm.riscv.vfadd.nxv4f32.nxv4f32( , , - i32); + iXLen); -define @intrinsic_vfadd_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, i32 %2) nounwind { +define @intrinsic_vfadd_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -377,7 +379,7 @@ entry: %a = call @llvm.riscv.vfadd.nxv4f32.nxv4f32( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -387,10 +389,10 @@ declare @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfadd_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfadd_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -402,7 +404,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -410,9 +412,9 @@ entry: declare @llvm.riscv.vfadd.nxv8f32.nxv8f32( , , - i32); + iXLen); -define @intrinsic_vfadd_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, i32 %2) nounwind { +define @intrinsic_vfadd_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -422,7 +424,7 @@ entry: %a = call @llvm.riscv.vfadd.nxv8f32.nxv8f32( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -432,10 +434,10 @@ declare @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfadd_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfadd_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -447,7 +449,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -455,9 +457,9 @@ entry: declare @llvm.riscv.vfadd.nxv16f32.nxv16f32( , , - i32); + iXLen); -define @intrinsic_vfadd_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, i32 %2) nounwind { +define @intrinsic_vfadd_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vv_nxv16f32_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu @@ -467,7 +469,7 @@ entry: %a = call @llvm.riscv.vfadd.nxv16f32.nxv16f32( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -477,10 +479,10 @@ declare @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfadd_mask_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfadd_mask_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv16f32_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re32.v v24, (a0) @@ -493,7 +495,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -501,9 +503,9 @@ entry: declare @llvm.riscv.vfadd.nxv1f64.nxv1f64( , , - i32); + iXLen); -define @intrinsic_vfadd_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, i32 %2) nounwind { +define @intrinsic_vfadd_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu @@ -513,7 +515,7 @@ entry: %a = call @llvm.riscv.vfadd.nxv1f64.nxv1f64( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -523,10 +525,10 @@ declare @llvm.riscv.vfadd.mask.nxv1f64.nxv1f64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfadd_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfadd_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu @@ -538,7 +540,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -546,9 +548,9 @@ entry: declare @llvm.riscv.vfadd.nxv2f64.nxv2f64( , , - i32); + iXLen); -define @intrinsic_vfadd_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, i32 %2) nounwind { +define @intrinsic_vfadd_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu @@ -558,7 +560,7 @@ entry: %a = call @llvm.riscv.vfadd.nxv2f64.nxv2f64( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -568,10 +570,10 @@ declare @llvm.riscv.vfadd.mask.nxv2f64.nxv2f64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfadd_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfadd_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu @@ -583,7 +585,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -591,9 +593,9 @@ entry: declare @llvm.riscv.vfadd.nxv4f64.nxv4f64( , , - i32); + iXLen); -define @intrinsic_vfadd_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, i32 %2) nounwind { +define @intrinsic_vfadd_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu @@ -603,7 +605,7 @@ entry: %a = call @llvm.riscv.vfadd.nxv4f64.nxv4f64( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -613,10 +615,10 @@ declare @llvm.riscv.vfadd.mask.nxv4f64.nxv4f64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfadd_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfadd_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu @@ -628,7 +630,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -636,9 +638,9 @@ entry: declare @llvm.riscv.vfadd.nxv8f64.nxv8f64( , , - i32); + iXLen); -define @intrinsic_vfadd_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, i32 %2) nounwind { +define @intrinsic_vfadd_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vv_nxv8f64_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu @@ -648,7 +650,7 @@ entry: %a = call @llvm.riscv.vfadd.nxv8f64.nxv8f64( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -658,10 +660,10 @@ declare @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfadd_mask_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfadd_mask_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv8f64_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re64.v v24, (a0) @@ -674,7 +676,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -682,9 +684,9 @@ entry: declare @llvm.riscv.vfadd.nxv1f16.f16( , half, - i32); + iXLen); -define @intrinsic_vfadd_vf_nxv1f16_nxv1f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfadd_vf_nxv1f16_nxv1f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -694,7 +696,7 @@ entry: %a = call @llvm.riscv.vfadd.nxv1f16.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -704,10 +706,10 @@ declare @llvm.riscv.vfadd.mask.nxv1f16.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfadd_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfadd_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -719,7 +721,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -727,9 +729,9 @@ entry: declare @llvm.riscv.vfadd.nxv2f16.f16( , half, - i32); + iXLen); -define @intrinsic_vfadd_vf_nxv2f16_nxv2f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfadd_vf_nxv2f16_nxv2f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -739,7 +741,7 @@ entry: %a = call @llvm.riscv.vfadd.nxv2f16.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -749,10 +751,10 @@ declare @llvm.riscv.vfadd.mask.nxv2f16.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfadd_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfadd_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -764,7 +766,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -772,9 +774,9 @@ entry: declare @llvm.riscv.vfadd.nxv4f16.f16( , half, - i32); + iXLen); -define @intrinsic_vfadd_vf_nxv4f16_nxv4f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfadd_vf_nxv4f16_nxv4f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -784,7 +786,7 @@ entry: %a = call @llvm.riscv.vfadd.nxv4f16.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -794,10 +796,10 @@ declare @llvm.riscv.vfadd.mask.nxv4f16.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfadd_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfadd_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -809,7 +811,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -817,9 +819,9 @@ entry: declare @llvm.riscv.vfadd.nxv8f16.f16( , half, - i32); + iXLen); -define @intrinsic_vfadd_vf_nxv8f16_nxv8f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfadd_vf_nxv8f16_nxv8f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -829,7 +831,7 @@ entry: %a = call @llvm.riscv.vfadd.nxv8f16.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -839,10 +841,10 @@ declare @llvm.riscv.vfadd.mask.nxv8f16.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfadd_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfadd_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -854,7 +856,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -862,9 +864,9 @@ entry: declare @llvm.riscv.vfadd.nxv16f16.f16( , half, - i32); + iXLen); -define @intrinsic_vfadd_vf_nxv16f16_nxv16f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfadd_vf_nxv16f16_nxv16f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -874,7 +876,7 @@ entry: %a = call @llvm.riscv.vfadd.nxv16f16.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -884,10 +886,10 @@ declare @llvm.riscv.vfadd.mask.nxv16f16.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfadd_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfadd_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -899,7 +901,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -907,9 +909,9 @@ entry: declare @llvm.riscv.vfadd.nxv32f16.f16( , half, - i32); + iXLen); -define @intrinsic_vfadd_vf_nxv32f16_nxv32f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfadd_vf_nxv32f16_nxv32f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu @@ -919,7 +921,7 @@ entry: %a = call @llvm.riscv.vfadd.nxv32f16.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -929,10 +931,10 @@ declare @llvm.riscv.vfadd.mask.nxv32f16.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfadd_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfadd_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu @@ -944,7 +946,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -952,9 +954,9 @@ entry: declare @llvm.riscv.vfadd.nxv1f32.f32( , float, - i32); + iXLen); -define @intrinsic_vfadd_vf_nxv1f32_nxv1f32_f32( %0, float %1, i32 %2) nounwind { +define @intrinsic_vfadd_vf_nxv1f32_nxv1f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -964,7 +966,7 @@ entry: %a = call @llvm.riscv.vfadd.nxv1f32.f32( %0, float %1, - i32 %2) + iXLen %2) ret %a } @@ -974,10 +976,10 @@ declare @llvm.riscv.vfadd.mask.nxv1f32.f32( , float, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfadd_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +define @intrinsic_vfadd_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -989,7 +991,7 @@ entry: %1, float %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -997,9 +999,9 @@ entry: declare @llvm.riscv.vfadd.nxv2f32.f32( , float, - i32); + iXLen); -define @intrinsic_vfadd_vf_nxv2f32_nxv2f32_f32( %0, float %1, i32 %2) nounwind { +define @intrinsic_vfadd_vf_nxv2f32_nxv2f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -1009,7 +1011,7 @@ entry: %a = call @llvm.riscv.vfadd.nxv2f32.f32( %0, float %1, - i32 %2) + iXLen %2) ret %a } @@ -1019,10 +1021,10 @@ declare @llvm.riscv.vfadd.mask.nxv2f32.f32( , float, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfadd_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +define @intrinsic_vfadd_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -1034,7 +1036,7 @@ entry: %1, float %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1042,9 +1044,9 @@ entry: declare @llvm.riscv.vfadd.nxv4f32.f32( , float, - i32); + iXLen); -define @intrinsic_vfadd_vf_nxv4f32_nxv4f32_f32( %0, float %1, i32 %2) nounwind { +define @intrinsic_vfadd_vf_nxv4f32_nxv4f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -1054,7 +1056,7 @@ entry: %a = call @llvm.riscv.vfadd.nxv4f32.f32( %0, float %1, - i32 %2) + iXLen %2) ret %a } @@ -1064,10 +1066,10 @@ declare @llvm.riscv.vfadd.mask.nxv4f32.f32( , float, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfadd_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +define @intrinsic_vfadd_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -1079,7 +1081,7 @@ entry: %1, float %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1087,9 +1089,9 @@ entry: declare @llvm.riscv.vfadd.nxv8f32.f32( , float, - i32); + iXLen); -define @intrinsic_vfadd_vf_nxv8f32_nxv8f32_f32( %0, float %1, i32 %2) nounwind { +define @intrinsic_vfadd_vf_nxv8f32_nxv8f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -1099,7 +1101,7 @@ entry: %a = call @llvm.riscv.vfadd.nxv8f32.f32( %0, float %1, - i32 %2) + iXLen %2) ret %a } @@ -1109,10 +1111,10 @@ declare @llvm.riscv.vfadd.mask.nxv8f32.f32( , float, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfadd_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +define @intrinsic_vfadd_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -1124,7 +1126,7 @@ entry: %1, float %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1132,9 +1134,9 @@ entry: declare @llvm.riscv.vfadd.nxv16f32.f32( , float, - i32); + iXLen); -define @intrinsic_vfadd_vf_nxv16f32_nxv16f32_f32( %0, float %1, i32 %2) nounwind { +define @intrinsic_vfadd_vf_nxv16f32_nxv16f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu @@ -1144,7 +1146,7 @@ entry: %a = call @llvm.riscv.vfadd.nxv16f32.f32( %0, float %1, - i32 %2) + iXLen %2) ret %a } @@ -1154,10 +1156,10 @@ declare @llvm.riscv.vfadd.mask.nxv16f32.f32( , float, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfadd_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +define @intrinsic_vfadd_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu @@ -1169,7 +1171,7 @@ entry: %1, float %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1177,9 +1179,9 @@ entry: declare @llvm.riscv.vfadd.nxv1f64.f64( , double, - i32); + iXLen); -define @intrinsic_vfadd_vf_nxv1f64_nxv1f64_f64( %0, double %1, i32 %2) nounwind { +define @intrinsic_vfadd_vf_nxv1f64_nxv1f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu @@ -1189,7 +1191,7 @@ entry: %a = call @llvm.riscv.vfadd.nxv1f64.f64( %0, double %1, - i32 %2) + iXLen %2) ret %a } @@ -1199,10 +1201,10 @@ declare @llvm.riscv.vfadd.mask.nxv1f64.f64( , double, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfadd_mask_vf_nxv1f64_nxv1f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { +define @intrinsic_vfadd_mask_vf_nxv1f64_nxv1f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu @@ -1214,7 +1216,7 @@ entry: %1, double %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1222,9 +1224,9 @@ entry: declare @llvm.riscv.vfadd.nxv2f64.f64( , double, - i32); + iXLen); -define @intrinsic_vfadd_vf_nxv2f64_nxv2f64_f64( %0, double %1, i32 %2) nounwind { +define @intrinsic_vfadd_vf_nxv2f64_nxv2f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu @@ -1234,7 +1236,7 @@ entry: %a = call @llvm.riscv.vfadd.nxv2f64.f64( %0, double %1, - i32 %2) + iXLen %2) ret %a } @@ -1244,10 +1246,10 @@ declare @llvm.riscv.vfadd.mask.nxv2f64.f64( , double, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfadd_mask_vf_nxv2f64_nxv2f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { +define @intrinsic_vfadd_mask_vf_nxv2f64_nxv2f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu @@ -1259,7 +1261,7 @@ entry: %1, double %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1267,9 +1269,9 @@ entry: declare @llvm.riscv.vfadd.nxv4f64.f64( , double, - i32); + iXLen); -define @intrinsic_vfadd_vf_nxv4f64_nxv4f64_f64( %0, double %1, i32 %2) nounwind { +define @intrinsic_vfadd_vf_nxv4f64_nxv4f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu @@ -1279,7 +1281,7 @@ entry: %a = call @llvm.riscv.vfadd.nxv4f64.f64( %0, double %1, - i32 %2) + iXLen %2) ret %a } @@ -1289,10 +1291,10 @@ declare @llvm.riscv.vfadd.mask.nxv4f64.f64( , double, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfadd_mask_vf_nxv4f64_nxv4f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { +define @intrinsic_vfadd_mask_vf_nxv4f64_nxv4f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu @@ -1304,7 +1306,7 @@ entry: %1, double %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1312,9 +1314,9 @@ entry: declare @llvm.riscv.vfadd.nxv8f64.f64( , double, - i32); + iXLen); -define @intrinsic_vfadd_vf_nxv8f64_nxv8f64_f64( %0, double %1, i32 %2) nounwind { +define @intrinsic_vfadd_vf_nxv8f64_nxv8f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu @@ -1324,7 +1326,7 @@ entry: %a = call @llvm.riscv.vfadd.nxv8f64.f64( %0, double %1, - i32 %2) + iXLen %2) ret %a } @@ -1334,10 +1336,10 @@ declare @llvm.riscv.vfadd.mask.nxv8f64.f64( , double, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfadd_mask_vf_nxv8f64_nxv8f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { +define @intrinsic_vfadd_mask_vf_nxv8f64_nxv8f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu @@ -1349,7 +1351,7 @@ entry: %1, double %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfclass-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfclass-rv64.ll deleted file mode 100644 index c86e1f334712..000000000000 --- a/llvm/test/CodeGen/RISCV/rvv/vfclass-rv64.ll +++ /dev/null @@ -1,692 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s -declare @llvm.riscv.vfclass.nxv1i16( - , - i64); - -define @intrinsic_vfclass_v_nxv1i16_nxv1f16( -; CHECK-LABEL: intrinsic_vfclass_v_nxv1i16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfclass.v v8, v8 -; CHECK-NEXT: ret - %0, - i64 %1) nounwind { -entry: - %a = call @llvm.riscv.vfclass.nxv1i16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfclass.mask.nxv1i16( - , - , - , - i64); - -define @intrinsic_vfclass_mask_v_nxv1i16_nxv1f16( -; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv1i16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu -; CHECK-NEXT: vfclass.v v8, v9, v0.t -; CHECK-NEXT: ret - %0, - %1, - %2, - i64 %3) nounwind { -entry: - %a = call @llvm.riscv.vfclass.mask.nxv1i16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfclass.nxv2i16( - , - i64); - -define @intrinsic_vfclass_v_nxv2i16_nxv2f16( -; CHECK-LABEL: intrinsic_vfclass_v_nxv2i16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfclass.v v8, v8 -; CHECK-NEXT: ret - %0, - i64 %1) nounwind { -entry: - %a = call @llvm.riscv.vfclass.nxv2i16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfclass.mask.nxv2i16( - , - , - , - i64); - -define @intrinsic_vfclass_mask_v_nxv2i16_nxv2f16( -; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv2i16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu -; CHECK-NEXT: vfclass.v v8, v9, v0.t -; CHECK-NEXT: ret - %0, - %1, - %2, - i64 %3) nounwind { -entry: - %a = call @llvm.riscv.vfclass.mask.nxv2i16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfclass.nxv4i16( - , - i64); - -define @intrinsic_vfclass_v_nxv4i16_nxv4f16( -; CHECK-LABEL: intrinsic_vfclass_v_nxv4i16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfclass.v v8, v8 -; CHECK-NEXT: ret - %0, - i64 %1) nounwind { -entry: - %a = call @llvm.riscv.vfclass.nxv4i16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfclass.mask.nxv4i16( - , - , - , - i64); - -define @intrinsic_vfclass_mask_v_nxv4i16_nxv4f16( -; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv4i16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu -; CHECK-NEXT: vfclass.v v8, v9, v0.t -; CHECK-NEXT: ret - %0, - %1, - %2, - i64 %3) nounwind { -entry: - %a = call @llvm.riscv.vfclass.mask.nxv4i16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfclass.nxv8i16( - , - i64); - -define @intrinsic_vfclass_v_nxv8i16_nxv8f16( -; CHECK-LABEL: intrinsic_vfclass_v_nxv8i16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfclass.v v8, v8 -; CHECK-NEXT: ret - %0, - i64 %1) nounwind { -entry: - %a = call @llvm.riscv.vfclass.nxv8i16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfclass.mask.nxv8i16( - , - , - , - i64); - -define @intrinsic_vfclass_mask_v_nxv8i16_nxv8f16( -; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv8i16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu -; CHECK-NEXT: vfclass.v v8, v10, v0.t -; CHECK-NEXT: ret - %0, - %1, - %2, - i64 %3) nounwind { -entry: - %a = call @llvm.riscv.vfclass.mask.nxv8i16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfclass.nxv16i16( - , - i64); - -define @intrinsic_vfclass_v_nxv16i16_nxv16f16( -; CHECK-LABEL: intrinsic_vfclass_v_nxv16i16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfclass.v v8, v8 -; CHECK-NEXT: ret - %0, - i64 %1) nounwind { -entry: - %a = call @llvm.riscv.vfclass.nxv16i16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfclass.mask.nxv16i16( - , - , - , - i64); - -define @intrinsic_vfclass_mask_v_nxv16i16_nxv16f16( -; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv16i16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu -; CHECK-NEXT: vfclass.v v8, v12, v0.t -; CHECK-NEXT: ret - %0, - %1, - %2, - i64 %3) nounwind { -entry: - %a = call @llvm.riscv.vfclass.mask.nxv16i16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfclass.nxv32i16( - , - i64); - -define @intrinsic_vfclass_v_nxv32i16_nxv32f16( -; CHECK-LABEL: intrinsic_vfclass_v_nxv32i16_nxv32f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu -; CHECK-NEXT: vfclass.v v8, v8 -; CHECK-NEXT: ret - %0, - i64 %1) nounwind { -entry: - %a = call @llvm.riscv.vfclass.nxv32i16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfclass.mask.nxv32i16( - , - , - , - i64); - -define @intrinsic_vfclass_mask_v_nxv32i16_nxv32f16( -; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv32i16_nxv32f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu -; CHECK-NEXT: vfclass.v v8, v16, v0.t -; CHECK-NEXT: ret - %0, - %1, - %2, - i64 %3) nounwind { -entry: - %a = call @llvm.riscv.vfclass.mask.nxv32i16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfclass.nxv1i32( - , - i64); - -define @intrinsic_vfclass_v_nxv1i32_nxv1f32( -; CHECK-LABEL: intrinsic_vfclass_v_nxv1i32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfclass.v v8, v8 -; CHECK-NEXT: ret - %0, - i64 %1) nounwind { -entry: - %a = call @llvm.riscv.vfclass.nxv1i32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfclass.mask.nxv1i32( - , - , - , - i64); - -define @intrinsic_vfclass_mask_v_nxv1i32_nxv1f32( -; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv1i32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu -; CHECK-NEXT: vfclass.v v8, v9, v0.t -; CHECK-NEXT: ret - %0, - %1, - %2, - i64 %3) nounwind { -entry: - %a = call @llvm.riscv.vfclass.mask.nxv1i32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfclass.nxv2i32( - , - i64); - -define @intrinsic_vfclass_v_nxv2i32_nxv2f32( -; CHECK-LABEL: intrinsic_vfclass_v_nxv2i32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfclass.v v8, v8 -; CHECK-NEXT: ret - %0, - i64 %1) nounwind { -entry: - %a = call @llvm.riscv.vfclass.nxv2i32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfclass.mask.nxv2i32( - , - , - , - i64); - -define @intrinsic_vfclass_mask_v_nxv2i32_nxv2f32( -; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv2i32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu -; CHECK-NEXT: vfclass.v v8, v9, v0.t -; CHECK-NEXT: ret - %0, - %1, - %2, - i64 %3) nounwind { -entry: - %a = call @llvm.riscv.vfclass.mask.nxv2i32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfclass.nxv4i32( - , - i64); - -define @intrinsic_vfclass_v_nxv4i32_nxv4f32( -; CHECK-LABEL: intrinsic_vfclass_v_nxv4i32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfclass.v v8, v8 -; CHECK-NEXT: ret - %0, - i64 %1) nounwind { -entry: - %a = call @llvm.riscv.vfclass.nxv4i32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfclass.mask.nxv4i32( - , - , - , - i64); - -define @intrinsic_vfclass_mask_v_nxv4i32_nxv4f32( -; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv4i32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu -; CHECK-NEXT: vfclass.v v8, v10, v0.t -; CHECK-NEXT: ret - %0, - %1, - %2, - i64 %3) nounwind { -entry: - %a = call @llvm.riscv.vfclass.mask.nxv4i32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfclass.nxv8i32( - , - i64); - -define @intrinsic_vfclass_v_nxv8i32_nxv8f32( -; CHECK-LABEL: intrinsic_vfclass_v_nxv8i32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfclass.v v8, v8 -; CHECK-NEXT: ret - %0, - i64 %1) nounwind { -entry: - %a = call @llvm.riscv.vfclass.nxv8i32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfclass.mask.nxv8i32( - , - , - , - i64); - -define @intrinsic_vfclass_mask_v_nxv8i32_nxv8f32( -; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv8i32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu -; CHECK-NEXT: vfclass.v v8, v12, v0.t -; CHECK-NEXT: ret - %0, - %1, - %2, - i64 %3) nounwind { -entry: - %a = call @llvm.riscv.vfclass.mask.nxv8i32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfclass.nxv16i32( - , - i64); - -define @intrinsic_vfclass_v_nxv16i32_nxv16f32( -; CHECK-LABEL: intrinsic_vfclass_v_nxv16i32_nxv16f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu -; CHECK-NEXT: vfclass.v v8, v8 -; CHECK-NEXT: ret - %0, - i64 %1) nounwind { -entry: - %a = call @llvm.riscv.vfclass.nxv16i32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfclass.mask.nxv16i32( - , - , - , - i64); - -define @intrinsic_vfclass_mask_v_nxv16i32_nxv16f32( -; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv16i32_nxv16f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu -; CHECK-NEXT: vfclass.v v8, v16, v0.t -; CHECK-NEXT: ret - %0, - %1, - %2, - i64 %3) nounwind { -entry: - %a = call @llvm.riscv.vfclass.mask.nxv16i32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfclass.nxv1i64( - , - i64); - -define @intrinsic_vfclass_v_nxv1i64_nxv1f64( -; CHECK-LABEL: intrinsic_vfclass_v_nxv1i64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vfclass.v v8, v8 -; CHECK-NEXT: ret - %0, - i64 %1) nounwind { -entry: - %a = call @llvm.riscv.vfclass.nxv1i64( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfclass.mask.nxv1i64( - , - , - , - i64); - -define @intrinsic_vfclass_mask_v_nxv1i64_nxv1f64( -; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv1i64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu -; CHECK-NEXT: vfclass.v v8, v9, v0.t -; CHECK-NEXT: ret - %0, - %1, - %2, - i64 %3) nounwind { -entry: - %a = call @llvm.riscv.vfclass.mask.nxv1i64( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfclass.nxv2i64( - , - i64); - -define @intrinsic_vfclass_v_nxv2i64_nxv2f64( -; CHECK-LABEL: intrinsic_vfclass_v_nxv2i64_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vfclass.v v8, v8 -; CHECK-NEXT: ret - %0, - i64 %1) nounwind { -entry: - %a = call @llvm.riscv.vfclass.nxv2i64( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfclass.mask.nxv2i64( - , - , - , - i64); - -define @intrinsic_vfclass_mask_v_nxv2i64_nxv2f64( -; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv2i64_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu -; CHECK-NEXT: vfclass.v v8, v10, v0.t -; CHECK-NEXT: ret - %0, - %1, - %2, - i64 %3) nounwind { -entry: - %a = call @llvm.riscv.vfclass.mask.nxv2i64( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfclass.nxv4i64( - , - i64); - -define @intrinsic_vfclass_v_nxv4i64_nxv4f64( -; CHECK-LABEL: intrinsic_vfclass_v_nxv4i64_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vfclass.v v8, v8 -; CHECK-NEXT: ret - %0, - i64 %1) nounwind { -entry: - %a = call @llvm.riscv.vfclass.nxv4i64( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfclass.mask.nxv4i64( - , - , - , - i64); - -define @intrinsic_vfclass_mask_v_nxv4i64_nxv4f64( -; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv4i64_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu -; CHECK-NEXT: vfclass.v v8, v12, v0.t -; CHECK-NEXT: ret - %0, - %1, - %2, - i64 %3) nounwind { -entry: - %a = call @llvm.riscv.vfclass.mask.nxv4i64( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfclass.nxv8i64( - , - i64); - -define @intrinsic_vfclass_v_nxv8i64_nxv8f64( -; CHECK-LABEL: intrinsic_vfclass_v_nxv8i64_nxv8f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu -; CHECK-NEXT: vfclass.v v8, v8 -; CHECK-NEXT: ret - %0, - i64 %1) nounwind { -entry: - %a = call @llvm.riscv.vfclass.nxv8i64( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfclass.mask.nxv8i64( - , - , - , - i64); - -define @intrinsic_vfclass_mask_v_nxv8i64_nxv8f64( -; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv8i64_nxv8f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu -; CHECK-NEXT: vfclass.v v8, v16, v0.t -; CHECK-NEXT: ret - %0, - %1, - %2, - i64 %3) nounwind { -entry: - %a = call @llvm.riscv.vfclass.mask.nxv8i64( - %0, - %1, - %2, - i64 %3) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfclass-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfclass.ll similarity index 91% rename from llvm/test/CodeGen/RISCV/rvv/vfclass-rv32.ll rename to llvm/test/CodeGen/RISCV/rvv/vfclass.ll index ae9df2aefa4d..e6aa39831095 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfclass-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfclass.ll @@ -1,9 +1,11 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfclass.nxv1i16( , - i32); + iXLen); define @intrinsic_vfclass_v_nxv1i16_nxv1f16( ; CHECK-LABEL: intrinsic_vfclass_v_nxv1i16_nxv1f16: @@ -12,11 +14,11 @@ define @intrinsic_vfclass_v_nxv1i16_nxv1f16( ; CHECK-NEXT: vfclass.v v8, v8 ; CHECK-NEXT: ret %0, - i32 %1) nounwind { + iXLen %1) nounwind { entry: %a = call @llvm.riscv.vfclass.nxv1i16( %0, - i32 %1) + iXLen %1) ret %a } @@ -25,7 +27,7 @@ declare @llvm.riscv.vfclass.mask.nxv1i16( , , , - i32); + iXLen); define @intrinsic_vfclass_mask_v_nxv1i16_nxv1f16( ; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv1i16_nxv1f16: @@ -36,20 +38,20 @@ define @intrinsic_vfclass_mask_v_nxv1i16_nxv1f16( %0, %1, %2, - i32 %3) nounwind { + iXLen %3) nounwind { entry: %a = call @llvm.riscv.vfclass.mask.nxv1i16( %0, %1, %2, - i32 %3) + iXLen %3) ret %a } declare @llvm.riscv.vfclass.nxv2i16( , - i32); + iXLen); define @intrinsic_vfclass_v_nxv2i16_nxv2f16( ; CHECK-LABEL: intrinsic_vfclass_v_nxv2i16_nxv2f16: @@ -58,11 +60,11 @@ define @intrinsic_vfclass_v_nxv2i16_nxv2f16( ; CHECK-NEXT: vfclass.v v8, v8 ; CHECK-NEXT: ret %0, - i32 %1) nounwind { + iXLen %1) nounwind { entry: %a = call @llvm.riscv.vfclass.nxv2i16( %0, - i32 %1) + iXLen %1) ret %a } @@ -71,7 +73,7 @@ declare @llvm.riscv.vfclass.mask.nxv2i16( , , , - i32); + iXLen); define @intrinsic_vfclass_mask_v_nxv2i16_nxv2f16( ; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv2i16_nxv2f16: @@ -82,20 +84,20 @@ define @intrinsic_vfclass_mask_v_nxv2i16_nxv2f16( %0, %1, %2, - i32 %3) nounwind { + iXLen %3) nounwind { entry: %a = call @llvm.riscv.vfclass.mask.nxv2i16( %0, %1, %2, - i32 %3) + iXLen %3) ret %a } declare @llvm.riscv.vfclass.nxv4i16( , - i32); + iXLen); define @intrinsic_vfclass_v_nxv4i16_nxv4f16( ; CHECK-LABEL: intrinsic_vfclass_v_nxv4i16_nxv4f16: @@ -104,11 +106,11 @@ define @intrinsic_vfclass_v_nxv4i16_nxv4f16( ; CHECK-NEXT: vfclass.v v8, v8 ; CHECK-NEXT: ret %0, - i32 %1) nounwind { + iXLen %1) nounwind { entry: %a = call @llvm.riscv.vfclass.nxv4i16( %0, - i32 %1) + iXLen %1) ret %a } @@ -117,7 +119,7 @@ declare @llvm.riscv.vfclass.mask.nxv4i16( , , , - i32); + iXLen); define @intrinsic_vfclass_mask_v_nxv4i16_nxv4f16( ; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv4i16_nxv4f16: @@ -128,20 +130,20 @@ define @intrinsic_vfclass_mask_v_nxv4i16_nxv4f16( %0, %1, %2, - i32 %3) nounwind { + iXLen %3) nounwind { entry: %a = call @llvm.riscv.vfclass.mask.nxv4i16( %0, %1, %2, - i32 %3) + iXLen %3) ret %a } declare @llvm.riscv.vfclass.nxv8i16( , - i32); + iXLen); define @intrinsic_vfclass_v_nxv8i16_nxv8f16( ; CHECK-LABEL: intrinsic_vfclass_v_nxv8i16_nxv8f16: @@ -150,11 +152,11 @@ define @intrinsic_vfclass_v_nxv8i16_nxv8f16( ; CHECK-NEXT: vfclass.v v8, v8 ; CHECK-NEXT: ret %0, - i32 %1) nounwind { + iXLen %1) nounwind { entry: %a = call @llvm.riscv.vfclass.nxv8i16( %0, - i32 %1) + iXLen %1) ret %a } @@ -163,7 +165,7 @@ declare @llvm.riscv.vfclass.mask.nxv8i16( , , , - i32); + iXLen); define @intrinsic_vfclass_mask_v_nxv8i16_nxv8f16( ; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv8i16_nxv8f16: @@ -174,20 +176,20 @@ define @intrinsic_vfclass_mask_v_nxv8i16_nxv8f16( %0, %1, %2, - i32 %3) nounwind { + iXLen %3) nounwind { entry: %a = call @llvm.riscv.vfclass.mask.nxv8i16( %0, %1, %2, - i32 %3) + iXLen %3) ret %a } declare @llvm.riscv.vfclass.nxv16i16( , - i32); + iXLen); define @intrinsic_vfclass_v_nxv16i16_nxv16f16( ; CHECK-LABEL: intrinsic_vfclass_v_nxv16i16_nxv16f16: @@ -196,11 +198,11 @@ define @intrinsic_vfclass_v_nxv16i16_nxv16f16( ; CHECK-NEXT: vfclass.v v8, v8 ; CHECK-NEXT: ret %0, - i32 %1) nounwind { + iXLen %1) nounwind { entry: %a = call @llvm.riscv.vfclass.nxv16i16( %0, - i32 %1) + iXLen %1) ret %a } @@ -209,7 +211,7 @@ declare @llvm.riscv.vfclass.mask.nxv16i16( , , , - i32); + iXLen); define @intrinsic_vfclass_mask_v_nxv16i16_nxv16f16( ; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv16i16_nxv16f16: @@ -220,20 +222,20 @@ define @intrinsic_vfclass_mask_v_nxv16i16_nxv16f16( %0, %1, %2, - i32 %3) nounwind { + iXLen %3) nounwind { entry: %a = call @llvm.riscv.vfclass.mask.nxv16i16( %0, %1, %2, - i32 %3) + iXLen %3) ret %a } declare @llvm.riscv.vfclass.nxv32i16( , - i32); + iXLen); define @intrinsic_vfclass_v_nxv32i16_nxv32f16( ; CHECK-LABEL: intrinsic_vfclass_v_nxv32i16_nxv32f16: @@ -242,11 +244,11 @@ define @intrinsic_vfclass_v_nxv32i16_nxv32f16( ; CHECK-NEXT: vfclass.v v8, v8 ; CHECK-NEXT: ret %0, - i32 %1) nounwind { + iXLen %1) nounwind { entry: %a = call @llvm.riscv.vfclass.nxv32i16( %0, - i32 %1) + iXLen %1) ret %a } @@ -255,7 +257,7 @@ declare @llvm.riscv.vfclass.mask.nxv32i16( , , , - i32); + iXLen); define @intrinsic_vfclass_mask_v_nxv32i16_nxv32f16( ; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv32i16_nxv32f16: @@ -266,20 +268,20 @@ define @intrinsic_vfclass_mask_v_nxv32i16_nxv32f16( %0, %1, %2, - i32 %3) nounwind { + iXLen %3) nounwind { entry: %a = call @llvm.riscv.vfclass.mask.nxv32i16( %0, %1, %2, - i32 %3) + iXLen %3) ret %a } declare @llvm.riscv.vfclass.nxv1i32( , - i32); + iXLen); define @intrinsic_vfclass_v_nxv1i32_nxv1f32( ; CHECK-LABEL: intrinsic_vfclass_v_nxv1i32_nxv1f32: @@ -288,11 +290,11 @@ define @intrinsic_vfclass_v_nxv1i32_nxv1f32( ; CHECK-NEXT: vfclass.v v8, v8 ; CHECK-NEXT: ret %0, - i32 %1) nounwind { + iXLen %1) nounwind { entry: %a = call @llvm.riscv.vfclass.nxv1i32( %0, - i32 %1) + iXLen %1) ret %a } @@ -301,7 +303,7 @@ declare @llvm.riscv.vfclass.mask.nxv1i32( , , , - i32); + iXLen); define @intrinsic_vfclass_mask_v_nxv1i32_nxv1f32( ; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv1i32_nxv1f32: @@ -312,20 +314,20 @@ define @intrinsic_vfclass_mask_v_nxv1i32_nxv1f32( %0, %1, %2, - i32 %3) nounwind { + iXLen %3) nounwind { entry: %a = call @llvm.riscv.vfclass.mask.nxv1i32( %0, %1, %2, - i32 %3) + iXLen %3) ret %a } declare @llvm.riscv.vfclass.nxv2i32( , - i32); + iXLen); define @intrinsic_vfclass_v_nxv2i32_nxv2f32( ; CHECK-LABEL: intrinsic_vfclass_v_nxv2i32_nxv2f32: @@ -334,11 +336,11 @@ define @intrinsic_vfclass_v_nxv2i32_nxv2f32( ; CHECK-NEXT: vfclass.v v8, v8 ; CHECK-NEXT: ret %0, - i32 %1) nounwind { + iXLen %1) nounwind { entry: %a = call @llvm.riscv.vfclass.nxv2i32( %0, - i32 %1) + iXLen %1) ret %a } @@ -347,7 +349,7 @@ declare @llvm.riscv.vfclass.mask.nxv2i32( , , , - i32); + iXLen); define @intrinsic_vfclass_mask_v_nxv2i32_nxv2f32( ; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv2i32_nxv2f32: @@ -358,20 +360,20 @@ define @intrinsic_vfclass_mask_v_nxv2i32_nxv2f32( %0, %1, %2, - i32 %3) nounwind { + iXLen %3) nounwind { entry: %a = call @llvm.riscv.vfclass.mask.nxv2i32( %0, %1, %2, - i32 %3) + iXLen %3) ret %a } declare @llvm.riscv.vfclass.nxv4i32( , - i32); + iXLen); define @intrinsic_vfclass_v_nxv4i32_nxv4f32( ; CHECK-LABEL: intrinsic_vfclass_v_nxv4i32_nxv4f32: @@ -380,11 +382,11 @@ define @intrinsic_vfclass_v_nxv4i32_nxv4f32( ; CHECK-NEXT: vfclass.v v8, v8 ; CHECK-NEXT: ret %0, - i32 %1) nounwind { + iXLen %1) nounwind { entry: %a = call @llvm.riscv.vfclass.nxv4i32( %0, - i32 %1) + iXLen %1) ret %a } @@ -393,7 +395,7 @@ declare @llvm.riscv.vfclass.mask.nxv4i32( , , , - i32); + iXLen); define @intrinsic_vfclass_mask_v_nxv4i32_nxv4f32( ; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv4i32_nxv4f32: @@ -404,20 +406,20 @@ define @intrinsic_vfclass_mask_v_nxv4i32_nxv4f32( %0, %1, %2, - i32 %3) nounwind { + iXLen %3) nounwind { entry: %a = call @llvm.riscv.vfclass.mask.nxv4i32( %0, %1, %2, - i32 %3) + iXLen %3) ret %a } declare @llvm.riscv.vfclass.nxv8i32( , - i32); + iXLen); define @intrinsic_vfclass_v_nxv8i32_nxv8f32( ; CHECK-LABEL: intrinsic_vfclass_v_nxv8i32_nxv8f32: @@ -426,11 +428,11 @@ define @intrinsic_vfclass_v_nxv8i32_nxv8f32( ; CHECK-NEXT: vfclass.v v8, v8 ; CHECK-NEXT: ret %0, - i32 %1) nounwind { + iXLen %1) nounwind { entry: %a = call @llvm.riscv.vfclass.nxv8i32( %0, - i32 %1) + iXLen %1) ret %a } @@ -439,7 +441,7 @@ declare @llvm.riscv.vfclass.mask.nxv8i32( , , , - i32); + iXLen); define @intrinsic_vfclass_mask_v_nxv8i32_nxv8f32( ; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv8i32_nxv8f32: @@ -450,20 +452,20 @@ define @intrinsic_vfclass_mask_v_nxv8i32_nxv8f32( %0, %1, %2, - i32 %3) nounwind { + iXLen %3) nounwind { entry: %a = call @llvm.riscv.vfclass.mask.nxv8i32( %0, %1, %2, - i32 %3) + iXLen %3) ret %a } declare @llvm.riscv.vfclass.nxv16i32( , - i32); + iXLen); define @intrinsic_vfclass_v_nxv16i32_nxv16f32( ; CHECK-LABEL: intrinsic_vfclass_v_nxv16i32_nxv16f32: @@ -472,11 +474,11 @@ define @intrinsic_vfclass_v_nxv16i32_nxv16f32( ; CHECK-NEXT: vfclass.v v8, v8 ; CHECK-NEXT: ret %0, - i32 %1) nounwind { + iXLen %1) nounwind { entry: %a = call @llvm.riscv.vfclass.nxv16i32( %0, - i32 %1) + iXLen %1) ret %a } @@ -485,7 +487,7 @@ declare @llvm.riscv.vfclass.mask.nxv16i32( , , , - i32); + iXLen); define @intrinsic_vfclass_mask_v_nxv16i32_nxv16f32( ; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv16i32_nxv16f32: @@ -496,20 +498,20 @@ define @intrinsic_vfclass_mask_v_nxv16i32_nxv16f32( %0, %1, %2, - i32 %3) nounwind { + iXLen %3) nounwind { entry: %a = call @llvm.riscv.vfclass.mask.nxv16i32( %0, %1, %2, - i32 %3) + iXLen %3) ret %a } declare @llvm.riscv.vfclass.nxv1i64( , - i32); + iXLen); define @intrinsic_vfclass_v_nxv1i64_nxv1f64( ; CHECK-LABEL: intrinsic_vfclass_v_nxv1i64_nxv1f64: @@ -518,11 +520,11 @@ define @intrinsic_vfclass_v_nxv1i64_nxv1f64( ; CHECK-NEXT: vfclass.v v8, v8 ; CHECK-NEXT: ret %0, - i32 %1) nounwind { + iXLen %1) nounwind { entry: %a = call @llvm.riscv.vfclass.nxv1i64( %0, - i32 %1) + iXLen %1) ret %a } @@ -531,7 +533,7 @@ declare @llvm.riscv.vfclass.mask.nxv1i64( , , , - i32); + iXLen); define @intrinsic_vfclass_mask_v_nxv1i64_nxv1f64( ; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv1i64_nxv1f64: @@ -542,20 +544,20 @@ define @intrinsic_vfclass_mask_v_nxv1i64_nxv1f64( %0, %1, %2, - i32 %3) nounwind { + iXLen %3) nounwind { entry: %a = call @llvm.riscv.vfclass.mask.nxv1i64( %0, %1, %2, - i32 %3) + iXLen %3) ret %a } declare @llvm.riscv.vfclass.nxv2i64( , - i32); + iXLen); define @intrinsic_vfclass_v_nxv2i64_nxv2f64( ; CHECK-LABEL: intrinsic_vfclass_v_nxv2i64_nxv2f64: @@ -564,11 +566,11 @@ define @intrinsic_vfclass_v_nxv2i64_nxv2f64( ; CHECK-NEXT: vfclass.v v8, v8 ; CHECK-NEXT: ret %0, - i32 %1) nounwind { + iXLen %1) nounwind { entry: %a = call @llvm.riscv.vfclass.nxv2i64( %0, - i32 %1) + iXLen %1) ret %a } @@ -577,7 +579,7 @@ declare @llvm.riscv.vfclass.mask.nxv2i64( , , , - i32); + iXLen); define @intrinsic_vfclass_mask_v_nxv2i64_nxv2f64( ; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv2i64_nxv2f64: @@ -588,20 +590,20 @@ define @intrinsic_vfclass_mask_v_nxv2i64_nxv2f64( %0, %1, %2, - i32 %3) nounwind { + iXLen %3) nounwind { entry: %a = call @llvm.riscv.vfclass.mask.nxv2i64( %0, %1, %2, - i32 %3) + iXLen %3) ret %a } declare @llvm.riscv.vfclass.nxv4i64( , - i32); + iXLen); define @intrinsic_vfclass_v_nxv4i64_nxv4f64( ; CHECK-LABEL: intrinsic_vfclass_v_nxv4i64_nxv4f64: @@ -610,11 +612,11 @@ define @intrinsic_vfclass_v_nxv4i64_nxv4f64( ; CHECK-NEXT: vfclass.v v8, v8 ; CHECK-NEXT: ret %0, - i32 %1) nounwind { + iXLen %1) nounwind { entry: %a = call @llvm.riscv.vfclass.nxv4i64( %0, - i32 %1) + iXLen %1) ret %a } @@ -623,7 +625,7 @@ declare @llvm.riscv.vfclass.mask.nxv4i64( , , , - i32); + iXLen); define @intrinsic_vfclass_mask_v_nxv4i64_nxv4f64( ; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv4i64_nxv4f64: @@ -634,20 +636,20 @@ define @intrinsic_vfclass_mask_v_nxv4i64_nxv4f64( %0, %1, %2, - i32 %3) nounwind { + iXLen %3) nounwind { entry: %a = call @llvm.riscv.vfclass.mask.nxv4i64( %0, %1, %2, - i32 %3) + iXLen %3) ret %a } declare @llvm.riscv.vfclass.nxv8i64( , - i32); + iXLen); define @intrinsic_vfclass_v_nxv8i64_nxv8f64( ; CHECK-LABEL: intrinsic_vfclass_v_nxv8i64_nxv8f64: @@ -656,11 +658,11 @@ define @intrinsic_vfclass_v_nxv8i64_nxv8f64( ; CHECK-NEXT: vfclass.v v8, v8 ; CHECK-NEXT: ret %0, - i32 %1) nounwind { + iXLen %1) nounwind { entry: %a = call @llvm.riscv.vfclass.nxv8i64( %0, - i32 %1) + iXLen %1) ret %a } @@ -669,7 +671,7 @@ declare @llvm.riscv.vfclass.mask.nxv8i64( , , , - i32); + iXLen); define @intrinsic_vfclass_mask_v_nxv8i64_nxv8f64( ; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv8i64_nxv8f64: @@ -680,13 +682,13 @@ define @intrinsic_vfclass_mask_v_nxv8i64_nxv8f64( %0, %1, %2, - i32 %3) nounwind { + iXLen %3) nounwind { entry: %a = call @llvm.riscv.vfclass.mask.nxv8i64( %0, %1, %2, - i32 %3) + iXLen %3) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfcvt-f-x-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfcvt-f-x-rv64.ll deleted file mode 100644 index 65270dc06336..000000000000 --- a/llvm/test/CodeGen/RISCV/rvv/vfcvt-f-x-rv64.ll +++ /dev/null @@ -1,617 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s -declare @llvm.riscv.vfcvt.f.x.v.nxv1f16.nxv1i16( - , - i64); - -define @intrinsic_vfcvt_f.x.v_nxv1f16_nxv1i16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv1f16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfcvt.f.x.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.f.x.v.nxv1f16.nxv1i16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.f.x.v.mask.nxv1f16.nxv1i16( - , - , - , - i64, - i64); - -define @intrinsic_vfcvt_mask_f.x.v_nxv1f16_nxv1i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv1f16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfcvt.f.x.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.f.x.v.mask.nxv1f16.nxv1i16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.f.x.v.nxv2f16.nxv2i16( - , - i64); - -define @intrinsic_vfcvt_f.x.v_nxv2f16_nxv2i16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv2f16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfcvt.f.x.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.f.x.v.nxv2f16.nxv2i16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.f.x.v.mask.nxv2f16.nxv2i16( - , - , - , - i64, - i64); - -define @intrinsic_vfcvt_mask_f.x.v_nxv2f16_nxv2i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv2f16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfcvt.f.x.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.f.x.v.mask.nxv2f16.nxv2i16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.f.x.v.nxv4f16.nxv4i16( - , - i64); - -define @intrinsic_vfcvt_f.x.v_nxv4f16_nxv4i16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv4f16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfcvt.f.x.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.f.x.v.nxv4f16.nxv4i16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.f.x.v.mask.nxv4f16.nxv4i16( - , - , - , - i64, - i64); - -define @intrinsic_vfcvt_mask_f.x.v_nxv4f16_nxv4i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv4f16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfcvt.f.x.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.f.x.v.mask.nxv4f16.nxv4i16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.f.x.v.nxv8f16.nxv8i16( - , - i64); - -define @intrinsic_vfcvt_f.x.v_nxv8f16_nxv8i16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv8f16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfcvt.f.x.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.f.x.v.nxv8f16.nxv8i16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.f.x.v.mask.nxv8f16.nxv8i16( - , - , - , - i64, - i64); - -define @intrinsic_vfcvt_mask_f.x.v_nxv8f16_nxv8i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv8f16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfcvt.f.x.v v8, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.f.x.v.mask.nxv8f16.nxv8i16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.f.x.v.nxv16f16.nxv16i16( - , - i64); - -define @intrinsic_vfcvt_f.x.v_nxv16f16_nxv16i16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv16f16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfcvt.f.x.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.f.x.v.nxv16f16.nxv16i16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.f.x.v.mask.nxv16f16.nxv16i16( - , - , - , - i64, - i64); - -define @intrinsic_vfcvt_mask_f.x.v_nxv16f16_nxv16i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv16f16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfcvt.f.x.v v8, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.f.x.v.mask.nxv16f16.nxv16i16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.f.x.v.nxv32f16.nxv32i16( - , - i64); - -define @intrinsic_vfcvt_f.x.v_nxv32f16_nxv32i16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv32f16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu -; CHECK-NEXT: vfcvt.f.x.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.f.x.v.nxv32f16.nxv32i16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.f.x.v.mask.nxv32f16.nxv32i16( - , - , - , - i64, - i64); - -define @intrinsic_vfcvt_mask_f.x.v_nxv32f16_nxv32i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv32f16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu -; CHECK-NEXT: vfcvt.f.x.v v8, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.f.x.v.mask.nxv32f16.nxv32i16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.f.x.v.nxv1f32.nxv1i32( - , - i64); - -define @intrinsic_vfcvt_f.x.v_nxv1f32_nxv1i32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv1f32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfcvt.f.x.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.f.x.v.nxv1f32.nxv1i32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.f.x.v.mask.nxv1f32.nxv1i32( - , - , - , - i64, - i64); - -define @intrinsic_vfcvt_mask_f.x.v_nxv1f32_nxv1i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv1f32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfcvt.f.x.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.f.x.v.mask.nxv1f32.nxv1i32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.f.x.v.nxv2f32.nxv2i32( - , - i64); - -define @intrinsic_vfcvt_f.x.v_nxv2f32_nxv2i32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv2f32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfcvt.f.x.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.f.x.v.nxv2f32.nxv2i32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.f.x.v.mask.nxv2f32.nxv2i32( - , - , - , - i64, - i64); - -define @intrinsic_vfcvt_mask_f.x.v_nxv2f32_nxv2i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv2f32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfcvt.f.x.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.f.x.v.mask.nxv2f32.nxv2i32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.f.x.v.nxv4f32.nxv4i32( - , - i64); - -define @intrinsic_vfcvt_f.x.v_nxv4f32_nxv4i32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv4f32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfcvt.f.x.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.f.x.v.nxv4f32.nxv4i32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.f.x.v.mask.nxv4f32.nxv4i32( - , - , - , - i64, - i64); - -define @intrinsic_vfcvt_mask_f.x.v_nxv4f32_nxv4i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv4f32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfcvt.f.x.v v8, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.f.x.v.mask.nxv4f32.nxv4i32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.f.x.v.nxv8f32.nxv8i32( - , - i64); - -define @intrinsic_vfcvt_f.x.v_nxv8f32_nxv8i32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv8f32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfcvt.f.x.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.f.x.v.nxv8f32.nxv8i32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.f.x.v.mask.nxv8f32.nxv8i32( - , - , - , - i64, - i64); - -define @intrinsic_vfcvt_mask_f.x.v_nxv8f32_nxv8i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv8f32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfcvt.f.x.v v8, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.f.x.v.mask.nxv8f32.nxv8i32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.f.x.v.nxv16f32.nxv16i32( - , - i64); - -define @intrinsic_vfcvt_f.x.v_nxv16f32_nxv16i32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv16f32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu -; CHECK-NEXT: vfcvt.f.x.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.f.x.v.nxv16f32.nxv16i32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.f.x.v.mask.nxv16f32.nxv16i32( - , - , - , - i64, - i64); - -define @intrinsic_vfcvt_mask_f.x.v_nxv16f32_nxv16i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv16f32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu -; CHECK-NEXT: vfcvt.f.x.v v8, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.f.x.v.mask.nxv16f32.nxv16i32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.f.x.v.nxv1f64.nxv1i64( - , - i64); - -define @intrinsic_vfcvt_f.x.v_nxv1f64_nxv1i64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv1f64_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vfcvt.f.x.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.f.x.v.nxv1f64.nxv1i64( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.f.x.v.mask.nxv1f64.nxv1i64( - , - , - , - i64, - i64); - -define @intrinsic_vfcvt_mask_f.x.v_nxv1f64_nxv1i64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv1f64_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vfcvt.f.x.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.f.x.v.mask.nxv1f64.nxv1i64( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.f.x.v.nxv2f64.nxv2i64( - , - i64); - -define @intrinsic_vfcvt_f.x.v_nxv2f64_nxv2i64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv2f64_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vfcvt.f.x.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.f.x.v.nxv2f64.nxv2i64( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.f.x.v.mask.nxv2f64.nxv2i64( - , - , - , - i64, - i64); - -define @intrinsic_vfcvt_mask_f.x.v_nxv2f64_nxv2i64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv2f64_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vfcvt.f.x.v v8, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.f.x.v.mask.nxv2f64.nxv2i64( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.f.x.v.nxv4f64.nxv4i64( - , - i64); - -define @intrinsic_vfcvt_f.x.v_nxv4f64_nxv4i64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv4f64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vfcvt.f.x.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.f.x.v.nxv4f64.nxv4i64( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.f.x.v.mask.nxv4f64.nxv4i64( - , - , - , - i64, - i64); - -define @intrinsic_vfcvt_mask_f.x.v_nxv4f64_nxv4i64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv4f64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vfcvt.f.x.v v8, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.f.x.v.mask.nxv4f64.nxv4i64( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.f.x.v.nxv8f64.nxv8i64( - , - i64); - -define @intrinsic_vfcvt_f.x.v_nxv8f64_nxv8i64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv8f64_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu -; CHECK-NEXT: vfcvt.f.x.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.f.x.v.nxv8f64.nxv8i64( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.f.x.v.mask.nxv8f64.nxv8i64( - , - , - , - i64, - i64); - -define @intrinsic_vfcvt_mask_f.x.v_nxv8f64_nxv8i64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv8f64_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu -; CHECK-NEXT: vfcvt.f.x.v v8, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.f.x.v.mask.nxv8f64.nxv8i64( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfcvt-f-x-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfcvt-f-x.ll similarity index 87% rename from llvm/test/CodeGen/RISCV/rvv/vfcvt-f-x-rv32.ll rename to llvm/test/CodeGen/RISCV/rvv/vfcvt-f-x.ll index 5549960bb773..e8d6257f1e65 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfcvt-f-x-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfcvt-f-x.ll @@ -1,11 +1,13 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfcvt.f.x.v.nxv1f16.nxv1i16( , - i32); + iXLen); -define @intrinsic_vfcvt_f.x.v_nxv1f16_nxv1i16( %0, i32 %1) nounwind { +define @intrinsic_vfcvt_f.x.v_nxv1f16_nxv1i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -14,7 +16,7 @@ define @intrinsic_vfcvt_f.x.v_nxv1f16_nxv1i16( @llvm.riscv.vfcvt.f.x.v.nxv1f16.nxv1i16( %0, - i32 %1) + iXLen %1) ret %a } @@ -23,10 +25,10 @@ declare @llvm.riscv.vfcvt.f.x.v.mask.nxv1f16.nxv1i16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_f.x.v_nxv1f16_nxv1i16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfcvt_mask_f.x.v_nxv1f16_nxv1i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -37,16 +39,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.f.x.v.nxv2f16.nxv2i16( , - i32); + iXLen); -define @intrinsic_vfcvt_f.x.v_nxv2f16_nxv2i16( %0, i32 %1) nounwind { +define @intrinsic_vfcvt_f.x.v_nxv2f16_nxv2i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -55,7 +57,7 @@ define @intrinsic_vfcvt_f.x.v_nxv2f16_nxv2i16( @llvm.riscv.vfcvt.f.x.v.nxv2f16.nxv2i16( %0, - i32 %1) + iXLen %1) ret %a } @@ -64,10 +66,10 @@ declare @llvm.riscv.vfcvt.f.x.v.mask.nxv2f16.nxv2i16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_f.x.v_nxv2f16_nxv2i16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfcvt_mask_f.x.v_nxv2f16_nxv2i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -78,16 +80,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.f.x.v.nxv4f16.nxv4i16( , - i32); + iXLen); -define @intrinsic_vfcvt_f.x.v_nxv4f16_nxv4i16( %0, i32 %1) nounwind { +define @intrinsic_vfcvt_f.x.v_nxv4f16_nxv4i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -96,7 +98,7 @@ define @intrinsic_vfcvt_f.x.v_nxv4f16_nxv4i16( @llvm.riscv.vfcvt.f.x.v.nxv4f16.nxv4i16( %0, - i32 %1) + iXLen %1) ret %a } @@ -105,10 +107,10 @@ declare @llvm.riscv.vfcvt.f.x.v.mask.nxv4f16.nxv4i16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_f.x.v_nxv4f16_nxv4i16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfcvt_mask_f.x.v_nxv4f16_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -119,16 +121,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.f.x.v.nxv8f16.nxv8i16( , - i32); + iXLen); -define @intrinsic_vfcvt_f.x.v_nxv8f16_nxv8i16( %0, i32 %1) nounwind { +define @intrinsic_vfcvt_f.x.v_nxv8f16_nxv8i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv8f16_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -137,7 +139,7 @@ define @intrinsic_vfcvt_f.x.v_nxv8f16_nxv8i16( @llvm.riscv.vfcvt.f.x.v.nxv8f16.nxv8i16( %0, - i32 %1) + iXLen %1) ret %a } @@ -146,10 +148,10 @@ declare @llvm.riscv.vfcvt.f.x.v.mask.nxv8f16.nxv8i16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_f.x.v_nxv8f16_nxv8i16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfcvt_mask_f.x.v_nxv8f16_nxv8i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv8f16_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -160,16 +162,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.f.x.v.nxv16f16.nxv16i16( , - i32); + iXLen); -define @intrinsic_vfcvt_f.x.v_nxv16f16_nxv16i16( %0, i32 %1) nounwind { +define @intrinsic_vfcvt_f.x.v_nxv16f16_nxv16i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv16f16_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -178,7 +180,7 @@ define @intrinsic_vfcvt_f.x.v_nxv16f16_nxv16i16( @llvm.riscv.vfcvt.f.x.v.nxv16f16.nxv16i16( %0, - i32 %1) + iXLen %1) ret %a } @@ -187,10 +189,10 @@ declare @llvm.riscv.vfcvt.f.x.v.mask.nxv16f16.nxv16i16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_f.x.v_nxv16f16_nxv16i16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfcvt_mask_f.x.v_nxv16f16_nxv16i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv16f16_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -201,16 +203,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.f.x.v.nxv32f16.nxv32i16( , - i32); + iXLen); -define @intrinsic_vfcvt_f.x.v_nxv32f16_nxv32i16( %0, i32 %1) nounwind { +define @intrinsic_vfcvt_f.x.v_nxv32f16_nxv32i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv32f16_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu @@ -219,7 +221,7 @@ define @intrinsic_vfcvt_f.x.v_nxv32f16_nxv32i16( @llvm.riscv.vfcvt.f.x.v.nxv32f16.nxv32i16( %0, - i32 %1) + iXLen %1) ret %a } @@ -228,10 +230,10 @@ declare @llvm.riscv.vfcvt.f.x.v.mask.nxv32f16.nxv32i16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_f.x.v_nxv32f16_nxv32i16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfcvt_mask_f.x.v_nxv32f16_nxv32i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv32f16_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu @@ -242,16 +244,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.f.x.v.nxv1f32.nxv1i32( , - i32); + iXLen); -define @intrinsic_vfcvt_f.x.v_nxv1f32_nxv1i32( %0, i32 %1) nounwind { +define @intrinsic_vfcvt_f.x.v_nxv1f32_nxv1i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -260,7 +262,7 @@ define @intrinsic_vfcvt_f.x.v_nxv1f32_nxv1i32( @llvm.riscv.vfcvt.f.x.v.nxv1f32.nxv1i32( %0, - i32 %1) + iXLen %1) ret %a } @@ -269,10 +271,10 @@ declare @llvm.riscv.vfcvt.f.x.v.mask.nxv1f32.nxv1i32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_f.x.v_nxv1f32_nxv1i32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfcvt_mask_f.x.v_nxv1f32_nxv1i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -283,16 +285,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.f.x.v.nxv2f32.nxv2i32( , - i32); + iXLen); -define @intrinsic_vfcvt_f.x.v_nxv2f32_nxv2i32( %0, i32 %1) nounwind { +define @intrinsic_vfcvt_f.x.v_nxv2f32_nxv2i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -301,7 +303,7 @@ define @intrinsic_vfcvt_f.x.v_nxv2f32_nxv2i32( @llvm.riscv.vfcvt.f.x.v.nxv2f32.nxv2i32( %0, - i32 %1) + iXLen %1) ret %a } @@ -310,10 +312,10 @@ declare @llvm.riscv.vfcvt.f.x.v.mask.nxv2f32.nxv2i32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_f.x.v_nxv2f32_nxv2i32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfcvt_mask_f.x.v_nxv2f32_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -324,16 +326,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.f.x.v.nxv4f32.nxv4i32( , - i32); + iXLen); -define @intrinsic_vfcvt_f.x.v_nxv4f32_nxv4i32( %0, i32 %1) nounwind { +define @intrinsic_vfcvt_f.x.v_nxv4f32_nxv4i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv4f32_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -342,7 +344,7 @@ define @intrinsic_vfcvt_f.x.v_nxv4f32_nxv4i32( @llvm.riscv.vfcvt.f.x.v.nxv4f32.nxv4i32( %0, - i32 %1) + iXLen %1) ret %a } @@ -351,10 +353,10 @@ declare @llvm.riscv.vfcvt.f.x.v.mask.nxv4f32.nxv4i32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_f.x.v_nxv4f32_nxv4i32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfcvt_mask_f.x.v_nxv4f32_nxv4i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv4f32_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -365,16 +367,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.f.x.v.nxv8f32.nxv8i32( , - i32); + iXLen); -define @intrinsic_vfcvt_f.x.v_nxv8f32_nxv8i32( %0, i32 %1) nounwind { +define @intrinsic_vfcvt_f.x.v_nxv8f32_nxv8i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv8f32_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -383,7 +385,7 @@ define @intrinsic_vfcvt_f.x.v_nxv8f32_nxv8i32( @llvm.riscv.vfcvt.f.x.v.nxv8f32.nxv8i32( %0, - i32 %1) + iXLen %1) ret %a } @@ -392,10 +394,10 @@ declare @llvm.riscv.vfcvt.f.x.v.mask.nxv8f32.nxv8i32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_f.x.v_nxv8f32_nxv8i32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfcvt_mask_f.x.v_nxv8f32_nxv8i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv8f32_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -406,16 +408,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.f.x.v.nxv16f32.nxv16i32( , - i32); + iXLen); -define @intrinsic_vfcvt_f.x.v_nxv16f32_nxv16i32( %0, i32 %1) nounwind { +define @intrinsic_vfcvt_f.x.v_nxv16f32_nxv16i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv16f32_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu @@ -424,7 +426,7 @@ define @intrinsic_vfcvt_f.x.v_nxv16f32_nxv16i32( @llvm.riscv.vfcvt.f.x.v.nxv16f32.nxv16i32( %0, - i32 %1) + iXLen %1) ret %a } @@ -433,10 +435,10 @@ declare @llvm.riscv.vfcvt.f.x.v.mask.nxv16f32.nxv16i32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_f.x.v_nxv16f32_nxv16i32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfcvt_mask_f.x.v_nxv16f32_nxv16i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv16f32_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu @@ -447,16 +449,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.f.x.v.nxv1f64.nxv1i64( , - i32); + iXLen); -define @intrinsic_vfcvt_f.x.v_nxv1f64_nxv1i64( %0, i32 %1) nounwind { +define @intrinsic_vfcvt_f.x.v_nxv1f64_nxv1i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv1f64_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu @@ -465,7 +467,7 @@ define @intrinsic_vfcvt_f.x.v_nxv1f64_nxv1i64( @llvm.riscv.vfcvt.f.x.v.nxv1f64.nxv1i64( %0, - i32 %1) + iXLen %1) ret %a } @@ -474,10 +476,10 @@ declare @llvm.riscv.vfcvt.f.x.v.mask.nxv1f64.nxv1i64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_f.x.v_nxv1f64_nxv1i64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfcvt_mask_f.x.v_nxv1f64_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv1f64_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu @@ -488,16 +490,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.f.x.v.nxv2f64.nxv2i64( , - i32); + iXLen); -define @intrinsic_vfcvt_f.x.v_nxv2f64_nxv2i64( %0, i32 %1) nounwind { +define @intrinsic_vfcvt_f.x.v_nxv2f64_nxv2i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv2f64_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu @@ -506,7 +508,7 @@ define @intrinsic_vfcvt_f.x.v_nxv2f64_nxv2i64( @llvm.riscv.vfcvt.f.x.v.nxv2f64.nxv2i64( %0, - i32 %1) + iXLen %1) ret %a } @@ -515,10 +517,10 @@ declare @llvm.riscv.vfcvt.f.x.v.mask.nxv2f64.nxv2i64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_f.x.v_nxv2f64_nxv2i64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfcvt_mask_f.x.v_nxv2f64_nxv2i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv2f64_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu @@ -529,16 +531,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.f.x.v.nxv4f64.nxv4i64( , - i32); + iXLen); -define @intrinsic_vfcvt_f.x.v_nxv4f64_nxv4i64( %0, i32 %1) nounwind { +define @intrinsic_vfcvt_f.x.v_nxv4f64_nxv4i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv4f64_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu @@ -547,7 +549,7 @@ define @intrinsic_vfcvt_f.x.v_nxv4f64_nxv4i64( @llvm.riscv.vfcvt.f.x.v.nxv4f64.nxv4i64( %0, - i32 %1) + iXLen %1) ret %a } @@ -556,10 +558,10 @@ declare @llvm.riscv.vfcvt.f.x.v.mask.nxv4f64.nxv4i64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_f.x.v_nxv4f64_nxv4i64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfcvt_mask_f.x.v_nxv4f64_nxv4i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv4f64_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu @@ -570,16 +572,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.f.x.v.nxv8f64.nxv8i64( , - i32); + iXLen); -define @intrinsic_vfcvt_f.x.v_nxv8f64_nxv8i64( %0, i32 %1) nounwind { +define @intrinsic_vfcvt_f.x.v_nxv8f64_nxv8i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv8f64_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu @@ -588,7 +590,7 @@ define @intrinsic_vfcvt_f.x.v_nxv8f64_nxv8i64( @llvm.riscv.vfcvt.f.x.v.nxv8f64.nxv8i64( %0, - i32 %1) + iXLen %1) ret %a } @@ -597,10 +599,10 @@ declare @llvm.riscv.vfcvt.f.x.v.mask.nxv8f64.nxv8i64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_f.x.v_nxv8f64_nxv8i64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfcvt_mask_f.x.v_nxv8f64_nxv8i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv8f64_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu @@ -611,7 +613,7 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfcvt-f-xu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfcvt-f-xu-rv64.ll deleted file mode 100644 index 6fc87d15dac1..000000000000 --- a/llvm/test/CodeGen/RISCV/rvv/vfcvt-f-xu-rv64.ll +++ /dev/null @@ -1,617 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s -declare @llvm.riscv.vfcvt.f.xu.v.nxv1f16.nxv1i16( - , - i64); - -define @intrinsic_vfcvt_f.xu.v_nxv1f16_nxv1i16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv1f16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfcvt.f.xu.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.f.xu.v.nxv1f16.nxv1i16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.f.xu.v.mask.nxv1f16.nxv1i16( - , - , - , - i64, - i64); - -define @intrinsic_vfcvt_mask_f.xu.v_nxv1f16_nxv1i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv1f16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfcvt.f.xu.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv1f16.nxv1i16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.f.xu.v.nxv2f16.nxv2i16( - , - i64); - -define @intrinsic_vfcvt_f.xu.v_nxv2f16_nxv2i16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv2f16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfcvt.f.xu.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.f.xu.v.nxv2f16.nxv2i16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.f.xu.v.mask.nxv2f16.nxv2i16( - , - , - , - i64, - i64); - -define @intrinsic_vfcvt_mask_f.xu.v_nxv2f16_nxv2i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv2f16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfcvt.f.xu.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv2f16.nxv2i16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.f.xu.v.nxv4f16.nxv4i16( - , - i64); - -define @intrinsic_vfcvt_f.xu.v_nxv4f16_nxv4i16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv4f16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfcvt.f.xu.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.f.xu.v.nxv4f16.nxv4i16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.f.xu.v.mask.nxv4f16.nxv4i16( - , - , - , - i64, - i64); - -define @intrinsic_vfcvt_mask_f.xu.v_nxv4f16_nxv4i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv4f16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfcvt.f.xu.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv4f16.nxv4i16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.f.xu.v.nxv8f16.nxv8i16( - , - i64); - -define @intrinsic_vfcvt_f.xu.v_nxv8f16_nxv8i16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv8f16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfcvt.f.xu.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.f.xu.v.nxv8f16.nxv8i16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.f.xu.v.mask.nxv8f16.nxv8i16( - , - , - , - i64, - i64); - -define @intrinsic_vfcvt_mask_f.xu.v_nxv8f16_nxv8i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv8f16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfcvt.f.xu.v v8, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv8f16.nxv8i16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.f.xu.v.nxv16f16.nxv16i16( - , - i64); - -define @intrinsic_vfcvt_f.xu.v_nxv16f16_nxv16i16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv16f16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfcvt.f.xu.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.f.xu.v.nxv16f16.nxv16i16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.f.xu.v.mask.nxv16f16.nxv16i16( - , - , - , - i64, - i64); - -define @intrinsic_vfcvt_mask_f.xu.v_nxv16f16_nxv16i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv16f16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfcvt.f.xu.v v8, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv16f16.nxv16i16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.f.xu.v.nxv32f16.nxv32i16( - , - i64); - -define @intrinsic_vfcvt_f.xu.v_nxv32f16_nxv32i16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv32f16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu -; CHECK-NEXT: vfcvt.f.xu.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.f.xu.v.nxv32f16.nxv32i16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.f.xu.v.mask.nxv32f16.nxv32i16( - , - , - , - i64, - i64); - -define @intrinsic_vfcvt_mask_f.xu.v_nxv32f16_nxv32i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv32f16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu -; CHECK-NEXT: vfcvt.f.xu.v v8, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv32f16.nxv32i16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.f.xu.v.nxv1f32.nxv1i32( - , - i64); - -define @intrinsic_vfcvt_f.xu.v_nxv1f32_nxv1i32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv1f32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfcvt.f.xu.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.f.xu.v.nxv1f32.nxv1i32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.f.xu.v.mask.nxv1f32.nxv1i32( - , - , - , - i64, - i64); - -define @intrinsic_vfcvt_mask_f.xu.v_nxv1f32_nxv1i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv1f32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfcvt.f.xu.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv1f32.nxv1i32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.f.xu.v.nxv2f32.nxv2i32( - , - i64); - -define @intrinsic_vfcvt_f.xu.v_nxv2f32_nxv2i32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv2f32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfcvt.f.xu.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.f.xu.v.nxv2f32.nxv2i32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.f.xu.v.mask.nxv2f32.nxv2i32( - , - , - , - i64, - i64); - -define @intrinsic_vfcvt_mask_f.xu.v_nxv2f32_nxv2i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv2f32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfcvt.f.xu.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv2f32.nxv2i32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.f.xu.v.nxv4f32.nxv4i32( - , - i64); - -define @intrinsic_vfcvt_f.xu.v_nxv4f32_nxv4i32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv4f32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfcvt.f.xu.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.f.xu.v.nxv4f32.nxv4i32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.f.xu.v.mask.nxv4f32.nxv4i32( - , - , - , - i64, - i64); - -define @intrinsic_vfcvt_mask_f.xu.v_nxv4f32_nxv4i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv4f32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfcvt.f.xu.v v8, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv4f32.nxv4i32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.f.xu.v.nxv8f32.nxv8i32( - , - i64); - -define @intrinsic_vfcvt_f.xu.v_nxv8f32_nxv8i32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv8f32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfcvt.f.xu.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.f.xu.v.nxv8f32.nxv8i32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.f.xu.v.mask.nxv8f32.nxv8i32( - , - , - , - i64, - i64); - -define @intrinsic_vfcvt_mask_f.xu.v_nxv8f32_nxv8i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv8f32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfcvt.f.xu.v v8, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv8f32.nxv8i32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.f.xu.v.nxv16f32.nxv16i32( - , - i64); - -define @intrinsic_vfcvt_f.xu.v_nxv16f32_nxv16i32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv16f32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu -; CHECK-NEXT: vfcvt.f.xu.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.f.xu.v.nxv16f32.nxv16i32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.f.xu.v.mask.nxv16f32.nxv16i32( - , - , - , - i64, - i64); - -define @intrinsic_vfcvt_mask_f.xu.v_nxv16f32_nxv16i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv16f32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu -; CHECK-NEXT: vfcvt.f.xu.v v8, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv16f32.nxv16i32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.f.xu.v.nxv1f64.nxv1i64( - , - i64); - -define @intrinsic_vfcvt_f.xu.v_nxv1f64_nxv1i64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv1f64_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vfcvt.f.xu.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.f.xu.v.nxv1f64.nxv1i64( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.f.xu.v.mask.nxv1f64.nxv1i64( - , - , - , - i64, - i64); - -define @intrinsic_vfcvt_mask_f.xu.v_nxv1f64_nxv1i64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv1f64_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vfcvt.f.xu.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv1f64.nxv1i64( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.f.xu.v.nxv2f64.nxv2i64( - , - i64); - -define @intrinsic_vfcvt_f.xu.v_nxv2f64_nxv2i64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv2f64_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vfcvt.f.xu.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.f.xu.v.nxv2f64.nxv2i64( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.f.xu.v.mask.nxv2f64.nxv2i64( - , - , - , - i64, - i64); - -define @intrinsic_vfcvt_mask_f.xu.v_nxv2f64_nxv2i64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv2f64_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vfcvt.f.xu.v v8, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv2f64.nxv2i64( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.f.xu.v.nxv4f64.nxv4i64( - , - i64); - -define @intrinsic_vfcvt_f.xu.v_nxv4f64_nxv4i64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv4f64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vfcvt.f.xu.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.f.xu.v.nxv4f64.nxv4i64( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.f.xu.v.mask.nxv4f64.nxv4i64( - , - , - , - i64, - i64); - -define @intrinsic_vfcvt_mask_f.xu.v_nxv4f64_nxv4i64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv4f64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vfcvt.f.xu.v v8, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv4f64.nxv4i64( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.f.xu.v.nxv8f64.nxv8i64( - , - i64); - -define @intrinsic_vfcvt_f.xu.v_nxv8f64_nxv8i64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv8f64_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu -; CHECK-NEXT: vfcvt.f.xu.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.f.xu.v.nxv8f64.nxv8i64( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.f.xu.v.mask.nxv8f64.nxv8i64( - , - , - , - i64, - i64); - -define @intrinsic_vfcvt_mask_f.xu.v_nxv8f64_nxv8i64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv8f64_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu -; CHECK-NEXT: vfcvt.f.xu.v v8, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv8f64.nxv8i64( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfcvt-f-xu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfcvt-f-xu.ll similarity index 87% rename from llvm/test/CodeGen/RISCV/rvv/vfcvt-f-xu-rv32.ll rename to llvm/test/CodeGen/RISCV/rvv/vfcvt-f-xu.ll index 1c8c2a80c90d..82ec8ca7cc74 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfcvt-f-xu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfcvt-f-xu.ll @@ -1,11 +1,13 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfcvt.f.xu.v.nxv1f16.nxv1i16( , - i32); + iXLen); -define @intrinsic_vfcvt_f.xu.v_nxv1f16_nxv1i16( %0, i32 %1) nounwind { +define @intrinsic_vfcvt_f.xu.v_nxv1f16_nxv1i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -14,7 +16,7 @@ define @intrinsic_vfcvt_f.xu.v_nxv1f16_nxv1i16( @llvm.riscv.vfcvt.f.xu.v.nxv1f16.nxv1i16( %0, - i32 %1) + iXLen %1) ret %a } @@ -23,10 +25,10 @@ declare @llvm.riscv.vfcvt.f.xu.v.mask.nxv1f16.nxv1i16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_f.xu.v_nxv1f16_nxv1i16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfcvt_mask_f.xu.v_nxv1f16_nxv1i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -37,16 +39,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.f.xu.v.nxv2f16.nxv2i16( , - i32); + iXLen); -define @intrinsic_vfcvt_f.xu.v_nxv2f16_nxv2i16( %0, i32 %1) nounwind { +define @intrinsic_vfcvt_f.xu.v_nxv2f16_nxv2i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -55,7 +57,7 @@ define @intrinsic_vfcvt_f.xu.v_nxv2f16_nxv2i16( @llvm.riscv.vfcvt.f.xu.v.nxv2f16.nxv2i16( %0, - i32 %1) + iXLen %1) ret %a } @@ -64,10 +66,10 @@ declare @llvm.riscv.vfcvt.f.xu.v.mask.nxv2f16.nxv2i16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_f.xu.v_nxv2f16_nxv2i16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfcvt_mask_f.xu.v_nxv2f16_nxv2i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -78,16 +80,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.f.xu.v.nxv4f16.nxv4i16( , - i32); + iXLen); -define @intrinsic_vfcvt_f.xu.v_nxv4f16_nxv4i16( %0, i32 %1) nounwind { +define @intrinsic_vfcvt_f.xu.v_nxv4f16_nxv4i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -96,7 +98,7 @@ define @intrinsic_vfcvt_f.xu.v_nxv4f16_nxv4i16( @llvm.riscv.vfcvt.f.xu.v.nxv4f16.nxv4i16( %0, - i32 %1) + iXLen %1) ret %a } @@ -105,10 +107,10 @@ declare @llvm.riscv.vfcvt.f.xu.v.mask.nxv4f16.nxv4i16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_f.xu.v_nxv4f16_nxv4i16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfcvt_mask_f.xu.v_nxv4f16_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -119,16 +121,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.f.xu.v.nxv8f16.nxv8i16( , - i32); + iXLen); -define @intrinsic_vfcvt_f.xu.v_nxv8f16_nxv8i16( %0, i32 %1) nounwind { +define @intrinsic_vfcvt_f.xu.v_nxv8f16_nxv8i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv8f16_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -137,7 +139,7 @@ define @intrinsic_vfcvt_f.xu.v_nxv8f16_nxv8i16( @llvm.riscv.vfcvt.f.xu.v.nxv8f16.nxv8i16( %0, - i32 %1) + iXLen %1) ret %a } @@ -146,10 +148,10 @@ declare @llvm.riscv.vfcvt.f.xu.v.mask.nxv8f16.nxv8i16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_f.xu.v_nxv8f16_nxv8i16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfcvt_mask_f.xu.v_nxv8f16_nxv8i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv8f16_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -160,16 +162,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.f.xu.v.nxv16f16.nxv16i16( , - i32); + iXLen); -define @intrinsic_vfcvt_f.xu.v_nxv16f16_nxv16i16( %0, i32 %1) nounwind { +define @intrinsic_vfcvt_f.xu.v_nxv16f16_nxv16i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv16f16_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -178,7 +180,7 @@ define @intrinsic_vfcvt_f.xu.v_nxv16f16_nxv16i16( @llvm.riscv.vfcvt.f.xu.v.nxv16f16.nxv16i16( %0, - i32 %1) + iXLen %1) ret %a } @@ -187,10 +189,10 @@ declare @llvm.riscv.vfcvt.f.xu.v.mask.nxv16f16.nxv16i16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_f.xu.v_nxv16f16_nxv16i16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfcvt_mask_f.xu.v_nxv16f16_nxv16i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv16f16_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -201,16 +203,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.f.xu.v.nxv32f16.nxv32i16( , - i32); + iXLen); -define @intrinsic_vfcvt_f.xu.v_nxv32f16_nxv32i16( %0, i32 %1) nounwind { +define @intrinsic_vfcvt_f.xu.v_nxv32f16_nxv32i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv32f16_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu @@ -219,7 +221,7 @@ define @intrinsic_vfcvt_f.xu.v_nxv32f16_nxv32i16( @llvm.riscv.vfcvt.f.xu.v.nxv32f16.nxv32i16( %0, - i32 %1) + iXLen %1) ret %a } @@ -228,10 +230,10 @@ declare @llvm.riscv.vfcvt.f.xu.v.mask.nxv32f16.nxv32i16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_f.xu.v_nxv32f16_nxv32i16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfcvt_mask_f.xu.v_nxv32f16_nxv32i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv32f16_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu @@ -242,16 +244,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.f.xu.v.nxv1f32.nxv1i32( , - i32); + iXLen); -define @intrinsic_vfcvt_f.xu.v_nxv1f32_nxv1i32( %0, i32 %1) nounwind { +define @intrinsic_vfcvt_f.xu.v_nxv1f32_nxv1i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -260,7 +262,7 @@ define @intrinsic_vfcvt_f.xu.v_nxv1f32_nxv1i32( @llvm.riscv.vfcvt.f.xu.v.nxv1f32.nxv1i32( %0, - i32 %1) + iXLen %1) ret %a } @@ -269,10 +271,10 @@ declare @llvm.riscv.vfcvt.f.xu.v.mask.nxv1f32.nxv1i32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_f.xu.v_nxv1f32_nxv1i32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfcvt_mask_f.xu.v_nxv1f32_nxv1i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -283,16 +285,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.f.xu.v.nxv2f32.nxv2i32( , - i32); + iXLen); -define @intrinsic_vfcvt_f.xu.v_nxv2f32_nxv2i32( %0, i32 %1) nounwind { +define @intrinsic_vfcvt_f.xu.v_nxv2f32_nxv2i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -301,7 +303,7 @@ define @intrinsic_vfcvt_f.xu.v_nxv2f32_nxv2i32( @llvm.riscv.vfcvt.f.xu.v.nxv2f32.nxv2i32( %0, - i32 %1) + iXLen %1) ret %a } @@ -310,10 +312,10 @@ declare @llvm.riscv.vfcvt.f.xu.v.mask.nxv2f32.nxv2i32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_f.xu.v_nxv2f32_nxv2i32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfcvt_mask_f.xu.v_nxv2f32_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -324,16 +326,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.f.xu.v.nxv4f32.nxv4i32( , - i32); + iXLen); -define @intrinsic_vfcvt_f.xu.v_nxv4f32_nxv4i32( %0, i32 %1) nounwind { +define @intrinsic_vfcvt_f.xu.v_nxv4f32_nxv4i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv4f32_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -342,7 +344,7 @@ define @intrinsic_vfcvt_f.xu.v_nxv4f32_nxv4i32( @llvm.riscv.vfcvt.f.xu.v.nxv4f32.nxv4i32( %0, - i32 %1) + iXLen %1) ret %a } @@ -351,10 +353,10 @@ declare @llvm.riscv.vfcvt.f.xu.v.mask.nxv4f32.nxv4i32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_f.xu.v_nxv4f32_nxv4i32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfcvt_mask_f.xu.v_nxv4f32_nxv4i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv4f32_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -365,16 +367,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.f.xu.v.nxv8f32.nxv8i32( , - i32); + iXLen); -define @intrinsic_vfcvt_f.xu.v_nxv8f32_nxv8i32( %0, i32 %1) nounwind { +define @intrinsic_vfcvt_f.xu.v_nxv8f32_nxv8i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv8f32_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -383,7 +385,7 @@ define @intrinsic_vfcvt_f.xu.v_nxv8f32_nxv8i32( @llvm.riscv.vfcvt.f.xu.v.nxv8f32.nxv8i32( %0, - i32 %1) + iXLen %1) ret %a } @@ -392,10 +394,10 @@ declare @llvm.riscv.vfcvt.f.xu.v.mask.nxv8f32.nxv8i32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_f.xu.v_nxv8f32_nxv8i32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfcvt_mask_f.xu.v_nxv8f32_nxv8i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv8f32_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -406,16 +408,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.f.xu.v.nxv16f32.nxv16i32( , - i32); + iXLen); -define @intrinsic_vfcvt_f.xu.v_nxv16f32_nxv16i32( %0, i32 %1) nounwind { +define @intrinsic_vfcvt_f.xu.v_nxv16f32_nxv16i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv16f32_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu @@ -424,7 +426,7 @@ define @intrinsic_vfcvt_f.xu.v_nxv16f32_nxv16i32( @llvm.riscv.vfcvt.f.xu.v.nxv16f32.nxv16i32( %0, - i32 %1) + iXLen %1) ret %a } @@ -433,10 +435,10 @@ declare @llvm.riscv.vfcvt.f.xu.v.mask.nxv16f32.nxv16i32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_f.xu.v_nxv16f32_nxv16i32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfcvt_mask_f.xu.v_nxv16f32_nxv16i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv16f32_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu @@ -447,16 +449,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.f.xu.v.nxv1f64.nxv1i64( , - i32); + iXLen); -define @intrinsic_vfcvt_f.xu.v_nxv1f64_nxv1i64( %0, i32 %1) nounwind { +define @intrinsic_vfcvt_f.xu.v_nxv1f64_nxv1i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv1f64_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu @@ -465,7 +467,7 @@ define @intrinsic_vfcvt_f.xu.v_nxv1f64_nxv1i64( @llvm.riscv.vfcvt.f.xu.v.nxv1f64.nxv1i64( %0, - i32 %1) + iXLen %1) ret %a } @@ -474,10 +476,10 @@ declare @llvm.riscv.vfcvt.f.xu.v.mask.nxv1f64.nxv1i64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_f.xu.v_nxv1f64_nxv1i64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfcvt_mask_f.xu.v_nxv1f64_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv1f64_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu @@ -488,16 +490,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.f.xu.v.nxv2f64.nxv2i64( , - i32); + iXLen); -define @intrinsic_vfcvt_f.xu.v_nxv2f64_nxv2i64( %0, i32 %1) nounwind { +define @intrinsic_vfcvt_f.xu.v_nxv2f64_nxv2i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv2f64_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu @@ -506,7 +508,7 @@ define @intrinsic_vfcvt_f.xu.v_nxv2f64_nxv2i64( @llvm.riscv.vfcvt.f.xu.v.nxv2f64.nxv2i64( %0, - i32 %1) + iXLen %1) ret %a } @@ -515,10 +517,10 @@ declare @llvm.riscv.vfcvt.f.xu.v.mask.nxv2f64.nxv2i64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_f.xu.v_nxv2f64_nxv2i64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfcvt_mask_f.xu.v_nxv2f64_nxv2i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv2f64_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu @@ -529,16 +531,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.f.xu.v.nxv4f64.nxv4i64( , - i32); + iXLen); -define @intrinsic_vfcvt_f.xu.v_nxv4f64_nxv4i64( %0, i32 %1) nounwind { +define @intrinsic_vfcvt_f.xu.v_nxv4f64_nxv4i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv4f64_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu @@ -547,7 +549,7 @@ define @intrinsic_vfcvt_f.xu.v_nxv4f64_nxv4i64( @llvm.riscv.vfcvt.f.xu.v.nxv4f64.nxv4i64( %0, - i32 %1) + iXLen %1) ret %a } @@ -556,10 +558,10 @@ declare @llvm.riscv.vfcvt.f.xu.v.mask.nxv4f64.nxv4i64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_f.xu.v_nxv4f64_nxv4i64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfcvt_mask_f.xu.v_nxv4f64_nxv4i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv4f64_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu @@ -570,16 +572,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.f.xu.v.nxv8f64.nxv8i64( , - i32); + iXLen); -define @intrinsic_vfcvt_f.xu.v_nxv8f64_nxv8i64( %0, i32 %1) nounwind { +define @intrinsic_vfcvt_f.xu.v_nxv8f64_nxv8i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv8f64_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu @@ -588,7 +590,7 @@ define @intrinsic_vfcvt_f.xu.v_nxv8f64_nxv8i64( @llvm.riscv.vfcvt.f.xu.v.nxv8f64.nxv8i64( %0, - i32 %1) + iXLen %1) ret %a } @@ -597,10 +599,10 @@ declare @llvm.riscv.vfcvt.f.xu.v.mask.nxv8f64.nxv8i64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_f.xu.v_nxv8f64_nxv8i64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfcvt_mask_f.xu.v_nxv8f64_nxv8i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv8f64_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu @@ -611,7 +613,7 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfcvt-rtz-x-f-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfcvt-rtz-x-f-rv32.ll deleted file mode 100644 index 75c0a7ff62a4..000000000000 --- a/llvm/test/CodeGen/RISCV/rvv/vfcvt-rtz-x-f-rv32.ll +++ /dev/null @@ -1,617 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s -declare @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i16.nxv1f16( - , - i32); - -define @intrinsic_vfcvt_rtz.x.f.v_nxv1i16_nxv1f16( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv1i16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i16.nxv1f16( - %0, - i32 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i16.nxv1f16( - , - , - , - i32, - i32); - -define @intrinsic_vfcvt_mask_rtz.x.f.v_nxv1i16_nxv1f16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv1i16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i16.nxv1f16( - %0, - %1, - %2, - i32 %3, i32 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i16.nxv2f16( - , - i32); - -define @intrinsic_vfcvt_rtz.x.f.v_nxv2i16_nxv2f16( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv2i16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i16.nxv2f16( - %0, - i32 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i16.nxv2f16( - , - , - , - i32, - i32); - -define @intrinsic_vfcvt_mask_rtz.x.f.v_nxv2i16_nxv2f16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv2i16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i16.nxv2f16( - %0, - %1, - %2, - i32 %3, i32 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i16.nxv4f16( - , - i32); - -define @intrinsic_vfcvt_rtz.x.f.v_nxv4i16_nxv4f16( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv4i16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i16.nxv4f16( - %0, - i32 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i16.nxv4f16( - , - , - , - i32, - i32); - -define @intrinsic_vfcvt_mask_rtz.x.f.v_nxv4i16_nxv4f16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv4i16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i16.nxv4f16( - %0, - %1, - %2, - i32 %3, i32 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i16.nxv8f16( - , - i32); - -define @intrinsic_vfcvt_rtz.x.f.v_nxv8i16_nxv8f16( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv8i16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i16.nxv8f16( - %0, - i32 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i16.nxv8f16( - , - , - , - i32, - i32); - -define @intrinsic_vfcvt_mask_rtz.x.f.v_nxv8i16_nxv8f16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv8i16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i16.nxv8f16( - %0, - %1, - %2, - i32 %3, i32 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.rtz.x.f.v.nxv16i16.nxv16f16( - , - i32); - -define @intrinsic_vfcvt_rtz.x.f.v_nxv16i16_nxv16f16( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv16i16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv16i16.nxv16f16( - %0, - i32 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv16i16.nxv16f16( - , - , - , - i32, - i32); - -define @intrinsic_vfcvt_mask_rtz.x.f.v_nxv16i16_nxv16f16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv16i16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv16i16.nxv16f16( - %0, - %1, - %2, - i32 %3, i32 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.rtz.x.f.v.nxv32i16.nxv32f16( - , - i32); - -define @intrinsic_vfcvt_rtz.x.f.v_nxv32i16_nxv32f16( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv32i16_nxv32f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu -; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv32i16.nxv32f16( - %0, - i32 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv32i16.nxv32f16( - , - , - , - i32, - i32); - -define @intrinsic_vfcvt_mask_rtz.x.f.v_nxv32i16_nxv32f16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv32i16_nxv32f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu -; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv32i16.nxv32f16( - %0, - %1, - %2, - i32 %3, i32 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i32.nxv1f32( - , - i32); - -define @intrinsic_vfcvt_rtz.x.f.v_nxv1i32_nxv1f32( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv1i32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i32.nxv1f32( - %0, - i32 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i32.nxv1f32( - , - , - , - i32, - i32); - -define @intrinsic_vfcvt_mask_rtz.x.f.v_nxv1i32_nxv1f32( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv1i32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i32.nxv1f32( - %0, - %1, - %2, - i32 %3, i32 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i32.nxv2f32( - , - i32); - -define @intrinsic_vfcvt_rtz.x.f.v_nxv2i32_nxv2f32( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv2i32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i32.nxv2f32( - %0, - i32 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i32.nxv2f32( - , - , - , - i32, - i32); - -define @intrinsic_vfcvt_mask_rtz.x.f.v_nxv2i32_nxv2f32( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv2i32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i32.nxv2f32( - %0, - %1, - %2, - i32 %3, i32 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i32.nxv4f32( - , - i32); - -define @intrinsic_vfcvt_rtz.x.f.v_nxv4i32_nxv4f32( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv4i32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i32.nxv4f32( - %0, - i32 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i32.nxv4f32( - , - , - , - i32, - i32); - -define @intrinsic_vfcvt_mask_rtz.x.f.v_nxv4i32_nxv4f32( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv4i32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i32.nxv4f32( - %0, - %1, - %2, - i32 %3, i32 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i32.nxv8f32( - , - i32); - -define @intrinsic_vfcvt_rtz.x.f.v_nxv8i32_nxv8f32( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv8i32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i32.nxv8f32( - %0, - i32 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i32.nxv8f32( - , - , - , - i32, - i32); - -define @intrinsic_vfcvt_mask_rtz.x.f.v_nxv8i32_nxv8f32( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv8i32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i32.nxv8f32( - %0, - %1, - %2, - i32 %3, i32 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.rtz.x.f.v.nxv16i32.nxv16f32( - , - i32); - -define @intrinsic_vfcvt_rtz.x.f.v_nxv16i32_nxv16f32( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv16i32_nxv16f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu -; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv16i32.nxv16f32( - %0, - i32 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv16i32.nxv16f32( - , - , - , - i32, - i32); - -define @intrinsic_vfcvt_mask_rtz.x.f.v_nxv16i32_nxv16f32( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv16i32_nxv16f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu -; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv16i32.nxv16f32( - %0, - %1, - %2, - i32 %3, i32 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i64.nxv1f64( - , - i32); - -define @intrinsic_vfcvt_rtz.x.f.v_nxv1i64_nxv1f64( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv1i64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i64.nxv1f64( - %0, - i32 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i64.nxv1f64( - , - , - , - i32, - i32); - -define @intrinsic_vfcvt_mask_rtz.x.f.v_nxv1i64_nxv1f64( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv1i64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i64.nxv1f64( - %0, - %1, - %2, - i32 %3, i32 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i64.nxv2f64( - , - i32); - -define @intrinsic_vfcvt_rtz.x.f.v_nxv2i64_nxv2f64( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv2i64_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i64.nxv2f64( - %0, - i32 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i64.nxv2f64( - , - , - , - i32, - i32); - -define @intrinsic_vfcvt_mask_rtz.x.f.v_nxv2i64_nxv2f64( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv2i64_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i64.nxv2f64( - %0, - %1, - %2, - i32 %3, i32 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i64.nxv4f64( - , - i32); - -define @intrinsic_vfcvt_rtz.x.f.v_nxv4i64_nxv4f64( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv4i64_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i64.nxv4f64( - %0, - i32 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i64.nxv4f64( - , - , - , - i32, - i32); - -define @intrinsic_vfcvt_mask_rtz.x.f.v_nxv4i64_nxv4f64( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv4i64_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i64.nxv4f64( - %0, - %1, - %2, - i32 %3, i32 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i64.nxv8f64( - , - i32); - -define @intrinsic_vfcvt_rtz.x.f.v_nxv8i64_nxv8f64( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv8i64_nxv8f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu -; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i64.nxv8f64( - %0, - i32 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i64.nxv8f64( - , - , - , - i32, - i32); - -define @intrinsic_vfcvt_mask_rtz.x.f.v_nxv8i64_nxv8f64( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv8i64_nxv8f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu -; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i64.nxv8f64( - %0, - %1, - %2, - i32 %3, i32 1) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfcvt-rtz-x-f-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfcvt-rtz-x-f.ll similarity index 87% rename from llvm/test/CodeGen/RISCV/rvv/vfcvt-rtz-x-f-rv64.ll rename to llvm/test/CodeGen/RISCV/rvv/vfcvt-rtz-x-f.ll index 9c3e2a03f114..0dbc0f221e7e 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfcvt-rtz-x-f-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfcvt-rtz-x-f.ll @@ -1,11 +1,13 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i16.nxv1f16( , - i64); + iXLen); -define @intrinsic_vfcvt_rtz.x.f.v_nxv1i16_nxv1f16( %0, i64 %1) nounwind { +define @intrinsic_vfcvt_rtz.x.f.v_nxv1i16_nxv1f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv1i16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -14,7 +16,7 @@ define @intrinsic_vfcvt_rtz.x.f.v_nxv1i16_nxv1f16( @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i16.nxv1f16( %0, - i64 %1) + iXLen %1) ret %a } @@ -23,10 +25,10 @@ declare @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i16.nxv1f16( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_rtz.x.f.v_nxv1i16_nxv1f16( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vfcvt_mask_rtz.x.f.v_nxv1i16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv1i16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -37,16 +39,16 @@ entry: %0, %1, %2, - i64 %3, i64 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i16.nxv2f16( , - i64); + iXLen); -define @intrinsic_vfcvt_rtz.x.f.v_nxv2i16_nxv2f16( %0, i64 %1) nounwind { +define @intrinsic_vfcvt_rtz.x.f.v_nxv2i16_nxv2f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv2i16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -55,7 +57,7 @@ define @intrinsic_vfcvt_rtz.x.f.v_nxv2i16_nxv2f16( @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i16.nxv2f16( %0, - i64 %1) + iXLen %1) ret %a } @@ -64,10 +66,10 @@ declare @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i16.nxv2f16( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_rtz.x.f.v_nxv2i16_nxv2f16( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vfcvt_mask_rtz.x.f.v_nxv2i16_nxv2f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv2i16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -78,16 +80,16 @@ entry: %0, %1, %2, - i64 %3, i64 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i16.nxv4f16( , - i64); + iXLen); -define @intrinsic_vfcvt_rtz.x.f.v_nxv4i16_nxv4f16( %0, i64 %1) nounwind { +define @intrinsic_vfcvt_rtz.x.f.v_nxv4i16_nxv4f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv4i16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -96,7 +98,7 @@ define @intrinsic_vfcvt_rtz.x.f.v_nxv4i16_nxv4f16( @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i16.nxv4f16( %0, - i64 %1) + iXLen %1) ret %a } @@ -105,10 +107,10 @@ declare @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i16.nxv4f16( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_rtz.x.f.v_nxv4i16_nxv4f16( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vfcvt_mask_rtz.x.f.v_nxv4i16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv4i16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -119,16 +121,16 @@ entry: %0, %1, %2, - i64 %3, i64 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i16.nxv8f16( , - i64); + iXLen); -define @intrinsic_vfcvt_rtz.x.f.v_nxv8i16_nxv8f16( %0, i64 %1) nounwind { +define @intrinsic_vfcvt_rtz.x.f.v_nxv8i16_nxv8f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv8i16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -137,7 +139,7 @@ define @intrinsic_vfcvt_rtz.x.f.v_nxv8i16_nxv8f16( @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i16.nxv8f16( %0, - i64 %1) + iXLen %1) ret %a } @@ -146,10 +148,10 @@ declare @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i16.nxv8f16( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_rtz.x.f.v_nxv8i16_nxv8f16( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vfcvt_mask_rtz.x.f.v_nxv8i16_nxv8f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv8i16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -160,16 +162,16 @@ entry: %0, %1, %2, - i64 %3, i64 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.rtz.x.f.v.nxv16i16.nxv16f16( , - i64); + iXLen); -define @intrinsic_vfcvt_rtz.x.f.v_nxv16i16_nxv16f16( %0, i64 %1) nounwind { +define @intrinsic_vfcvt_rtz.x.f.v_nxv16i16_nxv16f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv16i16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -178,7 +180,7 @@ define @intrinsic_vfcvt_rtz.x.f.v_nxv16i16_nxv16f16( @llvm.riscv.vfcvt.rtz.x.f.v.nxv16i16.nxv16f16( %0, - i64 %1) + iXLen %1) ret %a } @@ -187,10 +189,10 @@ declare @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv16i16.nxv16f16( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_rtz.x.f.v_nxv16i16_nxv16f16( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vfcvt_mask_rtz.x.f.v_nxv16i16_nxv16f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv16i16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -201,16 +203,16 @@ entry: %0, %1, %2, - i64 %3, i64 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.rtz.x.f.v.nxv32i16.nxv32f16( , - i64); + iXLen); -define @intrinsic_vfcvt_rtz.x.f.v_nxv32i16_nxv32f16( %0, i64 %1) nounwind { +define @intrinsic_vfcvt_rtz.x.f.v_nxv32i16_nxv32f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv32i16_nxv32f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu @@ -219,7 +221,7 @@ define @intrinsic_vfcvt_rtz.x.f.v_nxv32i16_nxv32f16( @llvm.riscv.vfcvt.rtz.x.f.v.nxv32i16.nxv32f16( %0, - i64 %1) + iXLen %1) ret %a } @@ -228,10 +230,10 @@ declare @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv32i16.nxv32f16( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_rtz.x.f.v_nxv32i16_nxv32f16( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vfcvt_mask_rtz.x.f.v_nxv32i16_nxv32f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv32i16_nxv32f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu @@ -242,16 +244,16 @@ entry: %0, %1, %2, - i64 %3, i64 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i32.nxv1f32( , - i64); + iXLen); -define @intrinsic_vfcvt_rtz.x.f.v_nxv1i32_nxv1f32( %0, i64 %1) nounwind { +define @intrinsic_vfcvt_rtz.x.f.v_nxv1i32_nxv1f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv1i32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -260,7 +262,7 @@ define @intrinsic_vfcvt_rtz.x.f.v_nxv1i32_nxv1f32( @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i32.nxv1f32( %0, - i64 %1) + iXLen %1) ret %a } @@ -269,10 +271,10 @@ declare @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i32.nxv1f32( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_rtz.x.f.v_nxv1i32_nxv1f32( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vfcvt_mask_rtz.x.f.v_nxv1i32_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv1i32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -283,16 +285,16 @@ entry: %0, %1, %2, - i64 %3, i64 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i32.nxv2f32( , - i64); + iXLen); -define @intrinsic_vfcvt_rtz.x.f.v_nxv2i32_nxv2f32( %0, i64 %1) nounwind { +define @intrinsic_vfcvt_rtz.x.f.v_nxv2i32_nxv2f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv2i32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -301,7 +303,7 @@ define @intrinsic_vfcvt_rtz.x.f.v_nxv2i32_nxv2f32( @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i32.nxv2f32( %0, - i64 %1) + iXLen %1) ret %a } @@ -310,10 +312,10 @@ declare @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i32.nxv2f32( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_rtz.x.f.v_nxv2i32_nxv2f32( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vfcvt_mask_rtz.x.f.v_nxv2i32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv2i32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -324,16 +326,16 @@ entry: %0, %1, %2, - i64 %3, i64 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i32.nxv4f32( , - i64); + iXLen); -define @intrinsic_vfcvt_rtz.x.f.v_nxv4i32_nxv4f32( %0, i64 %1) nounwind { +define @intrinsic_vfcvt_rtz.x.f.v_nxv4i32_nxv4f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv4i32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -342,7 +344,7 @@ define @intrinsic_vfcvt_rtz.x.f.v_nxv4i32_nxv4f32( @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i32.nxv4f32( %0, - i64 %1) + iXLen %1) ret %a } @@ -351,10 +353,10 @@ declare @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i32.nxv4f32( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_rtz.x.f.v_nxv4i32_nxv4f32( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vfcvt_mask_rtz.x.f.v_nxv4i32_nxv4f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv4i32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -365,16 +367,16 @@ entry: %0, %1, %2, - i64 %3, i64 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i32.nxv8f32( , - i64); + iXLen); -define @intrinsic_vfcvt_rtz.x.f.v_nxv8i32_nxv8f32( %0, i64 %1) nounwind { +define @intrinsic_vfcvt_rtz.x.f.v_nxv8i32_nxv8f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv8i32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -383,7 +385,7 @@ define @intrinsic_vfcvt_rtz.x.f.v_nxv8i32_nxv8f32( @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i32.nxv8f32( %0, - i64 %1) + iXLen %1) ret %a } @@ -392,10 +394,10 @@ declare @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i32.nxv8f32( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_rtz.x.f.v_nxv8i32_nxv8f32( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vfcvt_mask_rtz.x.f.v_nxv8i32_nxv8f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv8i32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -406,16 +408,16 @@ entry: %0, %1, %2, - i64 %3, i64 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.rtz.x.f.v.nxv16i32.nxv16f32( , - i64); + iXLen); -define @intrinsic_vfcvt_rtz.x.f.v_nxv16i32_nxv16f32( %0, i64 %1) nounwind { +define @intrinsic_vfcvt_rtz.x.f.v_nxv16i32_nxv16f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv16i32_nxv16f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu @@ -424,7 +426,7 @@ define @intrinsic_vfcvt_rtz.x.f.v_nxv16i32_nxv16f32( @llvm.riscv.vfcvt.rtz.x.f.v.nxv16i32.nxv16f32( %0, - i64 %1) + iXLen %1) ret %a } @@ -433,10 +435,10 @@ declare @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv16i32.nxv16f32( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_rtz.x.f.v_nxv16i32_nxv16f32( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vfcvt_mask_rtz.x.f.v_nxv16i32_nxv16f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv16i32_nxv16f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu @@ -447,16 +449,16 @@ entry: %0, %1, %2, - i64 %3, i64 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i64.nxv1f64( , - i64); + iXLen); -define @intrinsic_vfcvt_rtz.x.f.v_nxv1i64_nxv1f64( %0, i64 %1) nounwind { +define @intrinsic_vfcvt_rtz.x.f.v_nxv1i64_nxv1f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv1i64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu @@ -465,7 +467,7 @@ define @intrinsic_vfcvt_rtz.x.f.v_nxv1i64_nxv1f64( @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i64.nxv1f64( %0, - i64 %1) + iXLen %1) ret %a } @@ -474,10 +476,10 @@ declare @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i64.nxv1f64( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_rtz.x.f.v_nxv1i64_nxv1f64( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vfcvt_mask_rtz.x.f.v_nxv1i64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv1i64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu @@ -488,16 +490,16 @@ entry: %0, %1, %2, - i64 %3, i64 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i64.nxv2f64( , - i64); + iXLen); -define @intrinsic_vfcvt_rtz.x.f.v_nxv2i64_nxv2f64( %0, i64 %1) nounwind { +define @intrinsic_vfcvt_rtz.x.f.v_nxv2i64_nxv2f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv2i64_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu @@ -506,7 +508,7 @@ define @intrinsic_vfcvt_rtz.x.f.v_nxv2i64_nxv2f64( @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i64.nxv2f64( %0, - i64 %1) + iXLen %1) ret %a } @@ -515,10 +517,10 @@ declare @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i64.nxv2f64( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_rtz.x.f.v_nxv2i64_nxv2f64( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vfcvt_mask_rtz.x.f.v_nxv2i64_nxv2f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv2i64_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu @@ -529,16 +531,16 @@ entry: %0, %1, %2, - i64 %3, i64 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i64.nxv4f64( , - i64); + iXLen); -define @intrinsic_vfcvt_rtz.x.f.v_nxv4i64_nxv4f64( %0, i64 %1) nounwind { +define @intrinsic_vfcvt_rtz.x.f.v_nxv4i64_nxv4f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv4i64_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu @@ -547,7 +549,7 @@ define @intrinsic_vfcvt_rtz.x.f.v_nxv4i64_nxv4f64( @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i64.nxv4f64( %0, - i64 %1) + iXLen %1) ret %a } @@ -556,10 +558,10 @@ declare @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i64.nxv4f64( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_rtz.x.f.v_nxv4i64_nxv4f64( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vfcvt_mask_rtz.x.f.v_nxv4i64_nxv4f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv4i64_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu @@ -570,16 +572,16 @@ entry: %0, %1, %2, - i64 %3, i64 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i64.nxv8f64( , - i64); + iXLen); -define @intrinsic_vfcvt_rtz.x.f.v_nxv8i64_nxv8f64( %0, i64 %1) nounwind { +define @intrinsic_vfcvt_rtz.x.f.v_nxv8i64_nxv8f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv8i64_nxv8f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu @@ -588,7 +590,7 @@ define @intrinsic_vfcvt_rtz.x.f.v_nxv8i64_nxv8f64( @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i64.nxv8f64( %0, - i64 %1) + iXLen %1) ret %a } @@ -597,10 +599,10 @@ declare @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i64.nxv8f64( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_rtz.x.f.v_nxv8i64_nxv8f64( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vfcvt_mask_rtz.x.f.v_nxv8i64_nxv8f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv8i64_nxv8f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu @@ -611,7 +613,7 @@ entry: %0, %1, %2, - i64 %3, i64 1) + iXLen %3, iXLen 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfcvt-rtz-xu-f-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfcvt-rtz-xu-f-rv64.ll deleted file mode 100644 index 3a309eea35db..000000000000 --- a/llvm/test/CodeGen/RISCV/rvv/vfcvt-rtz-xu-f-rv64.ll +++ /dev/null @@ -1,617 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s -declare @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i16.nxv1f16( - , - i64); - -define @intrinsic_vfcvt_rtz.xu.f.v_nxv1i16_nxv1f16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_rtz.xu.f.v_nxv1i16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i16.nxv1f16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i16.nxv1f16( - , - , - , - i64, - i64); - -define @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv1i16_nxv1f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.xu.f.v_nxv1i16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i16.nxv1f16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i16.nxv2f16( - , - i64); - -define @intrinsic_vfcvt_rtz.xu.f.v_nxv2i16_nxv2f16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_rtz.xu.f.v_nxv2i16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i16.nxv2f16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i16.nxv2f16( - , - , - , - i64, - i64); - -define @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv2i16_nxv2f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.xu.f.v_nxv2i16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i16.nxv2f16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i16.nxv4f16( - , - i64); - -define @intrinsic_vfcvt_rtz.xu.f.v_nxv4i16_nxv4f16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_rtz.xu.f.v_nxv4i16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i16.nxv4f16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i16.nxv4f16( - , - , - , - i64, - i64); - -define @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv4i16_nxv4f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.xu.f.v_nxv4i16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i16.nxv4f16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i16.nxv8f16( - , - i64); - -define @intrinsic_vfcvt_rtz.xu.f.v_nxv8i16_nxv8f16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_rtz.xu.f.v_nxv8i16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i16.nxv8f16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i16.nxv8f16( - , - , - , - i64, - i64); - -define @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv8i16_nxv8f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.xu.f.v_nxv8i16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i16.nxv8f16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.rtz.xu.f.v.nxv16i16.nxv16f16( - , - i64); - -define @intrinsic_vfcvt_rtz.xu.f.v_nxv16i16_nxv16f16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_rtz.xu.f.v_nxv16i16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv16i16.nxv16f16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv16i16.nxv16f16( - , - , - , - i64, - i64); - -define @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv16i16_nxv16f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.xu.f.v_nxv16i16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv16i16.nxv16f16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.rtz.xu.f.v.nxv32i16.nxv32f16( - , - i64); - -define @intrinsic_vfcvt_rtz.xu.f.v_nxv32i16_nxv32f16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_rtz.xu.f.v_nxv32i16_nxv32f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu -; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv32i16.nxv32f16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv32i16.nxv32f16( - , - , - , - i64, - i64); - -define @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv32i16_nxv32f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.xu.f.v_nxv32i16_nxv32f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu -; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv32i16.nxv32f16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i32.nxv1f32( - , - i64); - -define @intrinsic_vfcvt_rtz.xu.f.v_nxv1i32_nxv1f32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_rtz.xu.f.v_nxv1i32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i32.nxv1f32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i32.nxv1f32( - , - , - , - i64, - i64); - -define @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv1i32_nxv1f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.xu.f.v_nxv1i32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i32.nxv1f32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i32.nxv2f32( - , - i64); - -define @intrinsic_vfcvt_rtz.xu.f.v_nxv2i32_nxv2f32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_rtz.xu.f.v_nxv2i32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i32.nxv2f32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i32.nxv2f32( - , - , - , - i64, - i64); - -define @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv2i32_nxv2f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.xu.f.v_nxv2i32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i32.nxv2f32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i32.nxv4f32( - , - i64); - -define @intrinsic_vfcvt_rtz.xu.f.v_nxv4i32_nxv4f32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_rtz.xu.f.v_nxv4i32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i32.nxv4f32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i32.nxv4f32( - , - , - , - i64, - i64); - -define @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv4i32_nxv4f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.xu.f.v_nxv4i32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i32.nxv4f32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i32.nxv8f32( - , - i64); - -define @intrinsic_vfcvt_rtz.xu.f.v_nxv8i32_nxv8f32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_rtz.xu.f.v_nxv8i32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i32.nxv8f32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i32.nxv8f32( - , - , - , - i64, - i64); - -define @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv8i32_nxv8f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.xu.f.v_nxv8i32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i32.nxv8f32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.rtz.xu.f.v.nxv16i32.nxv16f32( - , - i64); - -define @intrinsic_vfcvt_rtz.xu.f.v_nxv16i32_nxv16f32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_rtz.xu.f.v_nxv16i32_nxv16f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu -; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv16i32.nxv16f32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv16i32.nxv16f32( - , - , - , - i64, - i64); - -define @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv16i32_nxv16f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.xu.f.v_nxv16i32_nxv16f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu -; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv16i32.nxv16f32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i64.nxv1f64( - , - i64); - -define @intrinsic_vfcvt_rtz.xu.f.v_nxv1i64_nxv1f64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_rtz.xu.f.v_nxv1i64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i64.nxv1f64( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i64.nxv1f64( - , - , - , - i64, - i64); - -define @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv1i64_nxv1f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.xu.f.v_nxv1i64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i64.nxv1f64( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i64.nxv2f64( - , - i64); - -define @intrinsic_vfcvt_rtz.xu.f.v_nxv2i64_nxv2f64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_rtz.xu.f.v_nxv2i64_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i64.nxv2f64( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i64.nxv2f64( - , - , - , - i64, - i64); - -define @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv2i64_nxv2f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.xu.f.v_nxv2i64_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i64.nxv2f64( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i64.nxv4f64( - , - i64); - -define @intrinsic_vfcvt_rtz.xu.f.v_nxv4i64_nxv4f64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_rtz.xu.f.v_nxv4i64_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i64.nxv4f64( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i64.nxv4f64( - , - , - , - i64, - i64); - -define @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv4i64_nxv4f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.xu.f.v_nxv4i64_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i64.nxv4f64( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i64.nxv8f64( - , - i64); - -define @intrinsic_vfcvt_rtz.xu.f.v_nxv8i64_nxv8f64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_rtz.xu.f.v_nxv8i64_nxv8f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu -; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i64.nxv8f64( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i64.nxv8f64( - , - , - , - i64, - i64); - -define @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv8i64_nxv8f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.xu.f.v_nxv8i64_nxv8f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu -; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i64.nxv8f64( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfcvt-rtz-xu-f-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfcvt-rtz-xu-f.ll similarity index 87% rename from llvm/test/CodeGen/RISCV/rvv/vfcvt-rtz-xu-f-rv32.ll rename to llvm/test/CodeGen/RISCV/rvv/vfcvt-rtz-xu-f.ll index 966a5d6f85a0..457a93587ec2 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfcvt-rtz-xu-f-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfcvt-rtz-xu-f.ll @@ -1,11 +1,13 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i16.nxv1f16( , - i32); + iXLen); -define @intrinsic_vfcvt_rtz.xu.f.v_nxv1i16_nxv1f16( %0, i32 %1) nounwind { +define @intrinsic_vfcvt_rtz.xu.f.v_nxv1i16_nxv1f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_rtz.xu.f.v_nxv1i16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -14,7 +16,7 @@ define @intrinsic_vfcvt_rtz.xu.f.v_nxv1i16_nxv1f16( @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i16.nxv1f16( %0, - i32 %1) + iXLen %1) ret %a } @@ -23,10 +25,10 @@ declare @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i16.nxv1f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv1i16_nxv1f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv1i16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.xu.f.v_nxv1i16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -37,16 +39,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i16.nxv2f16( , - i32); + iXLen); -define @intrinsic_vfcvt_rtz.xu.f.v_nxv2i16_nxv2f16( %0, i32 %1) nounwind { +define @intrinsic_vfcvt_rtz.xu.f.v_nxv2i16_nxv2f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_rtz.xu.f.v_nxv2i16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -55,7 +57,7 @@ define @intrinsic_vfcvt_rtz.xu.f.v_nxv2i16_nxv2f16( @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i16.nxv2f16( %0, - i32 %1) + iXLen %1) ret %a } @@ -64,10 +66,10 @@ declare @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i16.nxv2f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv2i16_nxv2f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv2i16_nxv2f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.xu.f.v_nxv2i16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -78,16 +80,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i16.nxv4f16( , - i32); + iXLen); -define @intrinsic_vfcvt_rtz.xu.f.v_nxv4i16_nxv4f16( %0, i32 %1) nounwind { +define @intrinsic_vfcvt_rtz.xu.f.v_nxv4i16_nxv4f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_rtz.xu.f.v_nxv4i16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -96,7 +98,7 @@ define @intrinsic_vfcvt_rtz.xu.f.v_nxv4i16_nxv4f16( @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i16.nxv4f16( %0, - i32 %1) + iXLen %1) ret %a } @@ -105,10 +107,10 @@ declare @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i16.nxv4f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv4i16_nxv4f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv4i16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.xu.f.v_nxv4i16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -119,16 +121,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i16.nxv8f16( , - i32); + iXLen); -define @intrinsic_vfcvt_rtz.xu.f.v_nxv8i16_nxv8f16( %0, i32 %1) nounwind { +define @intrinsic_vfcvt_rtz.xu.f.v_nxv8i16_nxv8f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_rtz.xu.f.v_nxv8i16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -137,7 +139,7 @@ define @intrinsic_vfcvt_rtz.xu.f.v_nxv8i16_nxv8f16( @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i16.nxv8f16( %0, - i32 %1) + iXLen %1) ret %a } @@ -146,10 +148,10 @@ declare @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i16.nxv8f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv8i16_nxv8f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv8i16_nxv8f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.xu.f.v_nxv8i16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -160,16 +162,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.rtz.xu.f.v.nxv16i16.nxv16f16( , - i32); + iXLen); -define @intrinsic_vfcvt_rtz.xu.f.v_nxv16i16_nxv16f16( %0, i32 %1) nounwind { +define @intrinsic_vfcvt_rtz.xu.f.v_nxv16i16_nxv16f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_rtz.xu.f.v_nxv16i16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -178,7 +180,7 @@ define @intrinsic_vfcvt_rtz.xu.f.v_nxv16i16_nxv16f16( @llvm.riscv.vfcvt.rtz.xu.f.v.nxv16i16.nxv16f16( %0, - i32 %1) + iXLen %1) ret %a } @@ -187,10 +189,10 @@ declare @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv16i16.nxv16f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv16i16_nxv16f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv16i16_nxv16f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.xu.f.v_nxv16i16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -201,16 +203,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.rtz.xu.f.v.nxv32i16.nxv32f16( , - i32); + iXLen); -define @intrinsic_vfcvt_rtz.xu.f.v_nxv32i16_nxv32f16( %0, i32 %1) nounwind { +define @intrinsic_vfcvt_rtz.xu.f.v_nxv32i16_nxv32f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_rtz.xu.f.v_nxv32i16_nxv32f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu @@ -219,7 +221,7 @@ define @intrinsic_vfcvt_rtz.xu.f.v_nxv32i16_nxv32f16( @llvm.riscv.vfcvt.rtz.xu.f.v.nxv32i16.nxv32f16( %0, - i32 %1) + iXLen %1) ret %a } @@ -228,10 +230,10 @@ declare @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv32i16.nxv32f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv32i16_nxv32f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv32i16_nxv32f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.xu.f.v_nxv32i16_nxv32f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu @@ -242,16 +244,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i32.nxv1f32( , - i32); + iXLen); -define @intrinsic_vfcvt_rtz.xu.f.v_nxv1i32_nxv1f32( %0, i32 %1) nounwind { +define @intrinsic_vfcvt_rtz.xu.f.v_nxv1i32_nxv1f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_rtz.xu.f.v_nxv1i32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -260,7 +262,7 @@ define @intrinsic_vfcvt_rtz.xu.f.v_nxv1i32_nxv1f32( @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i32.nxv1f32( %0, - i32 %1) + iXLen %1) ret %a } @@ -269,10 +271,10 @@ declare @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i32.nxv1f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv1i32_nxv1f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv1i32_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.xu.f.v_nxv1i32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -283,16 +285,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i32.nxv2f32( , - i32); + iXLen); -define @intrinsic_vfcvt_rtz.xu.f.v_nxv2i32_nxv2f32( %0, i32 %1) nounwind { +define @intrinsic_vfcvt_rtz.xu.f.v_nxv2i32_nxv2f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_rtz.xu.f.v_nxv2i32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -301,7 +303,7 @@ define @intrinsic_vfcvt_rtz.xu.f.v_nxv2i32_nxv2f32( @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i32.nxv2f32( %0, - i32 %1) + iXLen %1) ret %a } @@ -310,10 +312,10 @@ declare @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i32.nxv2f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv2i32_nxv2f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv2i32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.xu.f.v_nxv2i32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -324,16 +326,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i32.nxv4f32( , - i32); + iXLen); -define @intrinsic_vfcvt_rtz.xu.f.v_nxv4i32_nxv4f32( %0, i32 %1) nounwind { +define @intrinsic_vfcvt_rtz.xu.f.v_nxv4i32_nxv4f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_rtz.xu.f.v_nxv4i32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -342,7 +344,7 @@ define @intrinsic_vfcvt_rtz.xu.f.v_nxv4i32_nxv4f32( @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i32.nxv4f32( %0, - i32 %1) + iXLen %1) ret %a } @@ -351,10 +353,10 @@ declare @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i32.nxv4f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv4i32_nxv4f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv4i32_nxv4f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.xu.f.v_nxv4i32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -365,16 +367,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i32.nxv8f32( , - i32); + iXLen); -define @intrinsic_vfcvt_rtz.xu.f.v_nxv8i32_nxv8f32( %0, i32 %1) nounwind { +define @intrinsic_vfcvt_rtz.xu.f.v_nxv8i32_nxv8f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_rtz.xu.f.v_nxv8i32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -383,7 +385,7 @@ define @intrinsic_vfcvt_rtz.xu.f.v_nxv8i32_nxv8f32( @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i32.nxv8f32( %0, - i32 %1) + iXLen %1) ret %a } @@ -392,10 +394,10 @@ declare @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i32.nxv8f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv8i32_nxv8f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv8i32_nxv8f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.xu.f.v_nxv8i32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -406,16 +408,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.rtz.xu.f.v.nxv16i32.nxv16f32( , - i32); + iXLen); -define @intrinsic_vfcvt_rtz.xu.f.v_nxv16i32_nxv16f32( %0, i32 %1) nounwind { +define @intrinsic_vfcvt_rtz.xu.f.v_nxv16i32_nxv16f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_rtz.xu.f.v_nxv16i32_nxv16f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu @@ -424,7 +426,7 @@ define @intrinsic_vfcvt_rtz.xu.f.v_nxv16i32_nxv16f32( @llvm.riscv.vfcvt.rtz.xu.f.v.nxv16i32.nxv16f32( %0, - i32 %1) + iXLen %1) ret %a } @@ -433,10 +435,10 @@ declare @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv16i32.nxv16f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv16i32_nxv16f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv16i32_nxv16f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.xu.f.v_nxv16i32_nxv16f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu @@ -447,16 +449,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i64.nxv1f64( , - i32); + iXLen); -define @intrinsic_vfcvt_rtz.xu.f.v_nxv1i64_nxv1f64( %0, i32 %1) nounwind { +define @intrinsic_vfcvt_rtz.xu.f.v_nxv1i64_nxv1f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_rtz.xu.f.v_nxv1i64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu @@ -465,7 +467,7 @@ define @intrinsic_vfcvt_rtz.xu.f.v_nxv1i64_nxv1f64( @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i64.nxv1f64( %0, - i32 %1) + iXLen %1) ret %a } @@ -474,10 +476,10 @@ declare @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i64.nxv1f64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv1i64_nxv1f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv1i64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.xu.f.v_nxv1i64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu @@ -488,16 +490,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i64.nxv2f64( , - i32); + iXLen); -define @intrinsic_vfcvt_rtz.xu.f.v_nxv2i64_nxv2f64( %0, i32 %1) nounwind { +define @intrinsic_vfcvt_rtz.xu.f.v_nxv2i64_nxv2f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_rtz.xu.f.v_nxv2i64_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu @@ -506,7 +508,7 @@ define @intrinsic_vfcvt_rtz.xu.f.v_nxv2i64_nxv2f64( @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i64.nxv2f64( %0, - i32 %1) + iXLen %1) ret %a } @@ -515,10 +517,10 @@ declare @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i64.nxv2f64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv2i64_nxv2f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv2i64_nxv2f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.xu.f.v_nxv2i64_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu @@ -529,16 +531,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i64.nxv4f64( , - i32); + iXLen); -define @intrinsic_vfcvt_rtz.xu.f.v_nxv4i64_nxv4f64( %0, i32 %1) nounwind { +define @intrinsic_vfcvt_rtz.xu.f.v_nxv4i64_nxv4f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_rtz.xu.f.v_nxv4i64_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu @@ -547,7 +549,7 @@ define @intrinsic_vfcvt_rtz.xu.f.v_nxv4i64_nxv4f64( @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i64.nxv4f64( %0, - i32 %1) + iXLen %1) ret %a } @@ -556,10 +558,10 @@ declare @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i64.nxv4f64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv4i64_nxv4f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv4i64_nxv4f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.xu.f.v_nxv4i64_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu @@ -570,16 +572,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i64.nxv8f64( , - i32); + iXLen); -define @intrinsic_vfcvt_rtz.xu.f.v_nxv8i64_nxv8f64( %0, i32 %1) nounwind { +define @intrinsic_vfcvt_rtz.xu.f.v_nxv8i64_nxv8f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_rtz.xu.f.v_nxv8i64_nxv8f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu @@ -588,7 +590,7 @@ define @intrinsic_vfcvt_rtz.xu.f.v_nxv8i64_nxv8f64( @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i64.nxv8f64( %0, - i32 %1) + iXLen %1) ret %a } @@ -597,10 +599,10 @@ declare @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i64.nxv8f64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv8i64_nxv8f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv8i64_nxv8f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.xu.f.v_nxv8i64_nxv8f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu @@ -611,7 +613,7 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfcvt-x-f-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfcvt-x-f-rv64.ll deleted file mode 100644 index f5984a512e0f..000000000000 --- a/llvm/test/CodeGen/RISCV/rvv/vfcvt-x-f-rv64.ll +++ /dev/null @@ -1,617 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s -declare @llvm.riscv.vfcvt.x.f.v.nxv1i16.nxv1f16( - , - i64); - -define @intrinsic_vfcvt_x.f.v_nxv1i16_nxv1f16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv1i16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfcvt.x.f.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.x.f.v.nxv1i16.nxv1f16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.x.f.v.mask.nxv1i16.nxv1f16( - , - , - , - i64, - i64); - -define @intrinsic_vfcvt_mask_x.f.v_nxv1i16_nxv1f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv1i16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfcvt.x.f.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.x.f.v.mask.nxv1i16.nxv1f16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.x.f.v.nxv2i16.nxv2f16( - , - i64); - -define @intrinsic_vfcvt_x.f.v_nxv2i16_nxv2f16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv2i16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfcvt.x.f.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.x.f.v.nxv2i16.nxv2f16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.x.f.v.mask.nxv2i16.nxv2f16( - , - , - , - i64, - i64); - -define @intrinsic_vfcvt_mask_x.f.v_nxv2i16_nxv2f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv2i16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfcvt.x.f.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.x.f.v.mask.nxv2i16.nxv2f16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.x.f.v.nxv4i16.nxv4f16( - , - i64); - -define @intrinsic_vfcvt_x.f.v_nxv4i16_nxv4f16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv4i16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfcvt.x.f.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.x.f.v.nxv4i16.nxv4f16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.x.f.v.mask.nxv4i16.nxv4f16( - , - , - , - i64, - i64); - -define @intrinsic_vfcvt_mask_x.f.v_nxv4i16_nxv4f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv4i16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfcvt.x.f.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.x.f.v.mask.nxv4i16.nxv4f16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.x.f.v.nxv8i16.nxv8f16( - , - i64); - -define @intrinsic_vfcvt_x.f.v_nxv8i16_nxv8f16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv8i16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfcvt.x.f.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.x.f.v.nxv8i16.nxv8f16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.x.f.v.mask.nxv8i16.nxv8f16( - , - , - , - i64, - i64); - -define @intrinsic_vfcvt_mask_x.f.v_nxv8i16_nxv8f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv8i16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfcvt.x.f.v v8, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.x.f.v.mask.nxv8i16.nxv8f16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.x.f.v.nxv16i16.nxv16f16( - , - i64); - -define @intrinsic_vfcvt_x.f.v_nxv16i16_nxv16f16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv16i16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfcvt.x.f.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.x.f.v.nxv16i16.nxv16f16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.x.f.v.mask.nxv16i16.nxv16f16( - , - , - , - i64, - i64); - -define @intrinsic_vfcvt_mask_x.f.v_nxv16i16_nxv16f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv16i16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfcvt.x.f.v v8, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.x.f.v.mask.nxv16i16.nxv16f16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.x.f.v.nxv32i16.nxv32f16( - , - i64); - -define @intrinsic_vfcvt_x.f.v_nxv32i16_nxv32f16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv32i16_nxv32f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu -; CHECK-NEXT: vfcvt.x.f.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.x.f.v.nxv32i16.nxv32f16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.x.f.v.mask.nxv32i16.nxv32f16( - , - , - , - i64, - i64); - -define @intrinsic_vfcvt_mask_x.f.v_nxv32i16_nxv32f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv32i16_nxv32f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu -; CHECK-NEXT: vfcvt.x.f.v v8, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.x.f.v.mask.nxv32i16.nxv32f16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.x.f.v.nxv1i32.nxv1f32( - , - i64); - -define @intrinsic_vfcvt_x.f.v_nxv1i32_nxv1f32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv1i32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfcvt.x.f.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.x.f.v.nxv1i32.nxv1f32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.x.f.v.mask.nxv1i32.nxv1f32( - , - , - , - i64, - i64); - -define @intrinsic_vfcvt_mask_x.f.v_nxv1i32_nxv1f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv1i32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfcvt.x.f.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.x.f.v.mask.nxv1i32.nxv1f32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.x.f.v.nxv2i32.nxv2f32( - , - i64); - -define @intrinsic_vfcvt_x.f.v_nxv2i32_nxv2f32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv2i32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfcvt.x.f.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.x.f.v.nxv2i32.nxv2f32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.x.f.v.mask.nxv2i32.nxv2f32( - , - , - , - i64, - i64); - -define @intrinsic_vfcvt_mask_x.f.v_nxv2i32_nxv2f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv2i32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfcvt.x.f.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.x.f.v.mask.nxv2i32.nxv2f32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.x.f.v.nxv4i32.nxv4f32( - , - i64); - -define @intrinsic_vfcvt_x.f.v_nxv4i32_nxv4f32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv4i32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfcvt.x.f.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.x.f.v.nxv4i32.nxv4f32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.x.f.v.mask.nxv4i32.nxv4f32( - , - , - , - i64, - i64); - -define @intrinsic_vfcvt_mask_x.f.v_nxv4i32_nxv4f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv4i32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfcvt.x.f.v v8, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.x.f.v.mask.nxv4i32.nxv4f32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.x.f.v.nxv8i32.nxv8f32( - , - i64); - -define @intrinsic_vfcvt_x.f.v_nxv8i32_nxv8f32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv8i32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfcvt.x.f.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.x.f.v.nxv8i32.nxv8f32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.x.f.v.mask.nxv8i32.nxv8f32( - , - , - , - i64, - i64); - -define @intrinsic_vfcvt_mask_x.f.v_nxv8i32_nxv8f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv8i32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfcvt.x.f.v v8, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.x.f.v.mask.nxv8i32.nxv8f32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.x.f.v.nxv16i32.nxv16f32( - , - i64); - -define @intrinsic_vfcvt_x.f.v_nxv16i32_nxv16f32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv16i32_nxv16f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu -; CHECK-NEXT: vfcvt.x.f.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.x.f.v.nxv16i32.nxv16f32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.x.f.v.mask.nxv16i32.nxv16f32( - , - , - , - i64, - i64); - -define @intrinsic_vfcvt_mask_x.f.v_nxv16i32_nxv16f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv16i32_nxv16f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu -; CHECK-NEXT: vfcvt.x.f.v v8, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.x.f.v.mask.nxv16i32.nxv16f32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.x.f.v.nxv1i64.nxv1f64( - , - i64); - -define @intrinsic_vfcvt_x.f.v_nxv1i64_nxv1f64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv1i64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vfcvt.x.f.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.x.f.v.nxv1i64.nxv1f64( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.x.f.v.mask.nxv1i64.nxv1f64( - , - , - , - i64, - i64); - -define @intrinsic_vfcvt_mask_x.f.v_nxv1i64_nxv1f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv1i64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vfcvt.x.f.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.x.f.v.mask.nxv1i64.nxv1f64( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.x.f.v.nxv2i64.nxv2f64( - , - i64); - -define @intrinsic_vfcvt_x.f.v_nxv2i64_nxv2f64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv2i64_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vfcvt.x.f.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.x.f.v.nxv2i64.nxv2f64( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.x.f.v.mask.nxv2i64.nxv2f64( - , - , - , - i64, - i64); - -define @intrinsic_vfcvt_mask_x.f.v_nxv2i64_nxv2f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv2i64_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vfcvt.x.f.v v8, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.x.f.v.mask.nxv2i64.nxv2f64( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.x.f.v.nxv4i64.nxv4f64( - , - i64); - -define @intrinsic_vfcvt_x.f.v_nxv4i64_nxv4f64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv4i64_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vfcvt.x.f.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.x.f.v.nxv4i64.nxv4f64( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.x.f.v.mask.nxv4i64.nxv4f64( - , - , - , - i64, - i64); - -define @intrinsic_vfcvt_mask_x.f.v_nxv4i64_nxv4f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv4i64_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vfcvt.x.f.v v8, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.x.f.v.mask.nxv4i64.nxv4f64( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.x.f.v.nxv8i64.nxv8f64( - , - i64); - -define @intrinsic_vfcvt_x.f.v_nxv8i64_nxv8f64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv8i64_nxv8f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu -; CHECK-NEXT: vfcvt.x.f.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.x.f.v.nxv8i64.nxv8f64( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.x.f.v.mask.nxv8i64.nxv8f64( - , - , - , - i64, - i64); - -define @intrinsic_vfcvt_mask_x.f.v_nxv8i64_nxv8f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv8i64_nxv8f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu -; CHECK-NEXT: vfcvt.x.f.v v8, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.x.f.v.mask.nxv8i64.nxv8f64( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfcvt-x-f-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfcvt-x-f.ll similarity index 87% rename from llvm/test/CodeGen/RISCV/rvv/vfcvt-x-f-rv32.ll rename to llvm/test/CodeGen/RISCV/rvv/vfcvt-x-f.ll index 26632717dfa9..88205bb75ce3 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfcvt-x-f-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfcvt-x-f.ll @@ -1,11 +1,13 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfcvt.x.f.v.nxv1i16.nxv1f16( , - i32); + iXLen); -define @intrinsic_vfcvt_x.f.v_nxv1i16_nxv1f16( %0, i32 %1) nounwind { +define @intrinsic_vfcvt_x.f.v_nxv1i16_nxv1f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv1i16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -14,7 +16,7 @@ define @intrinsic_vfcvt_x.f.v_nxv1i16_nxv1f16( @llvm.riscv.vfcvt.x.f.v.nxv1i16.nxv1f16( %0, - i32 %1) + iXLen %1) ret %a } @@ -23,10 +25,10 @@ declare @llvm.riscv.vfcvt.x.f.v.mask.nxv1i16.nxv1f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_x.f.v_nxv1i16_nxv1f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfcvt_mask_x.f.v_nxv1i16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv1i16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -37,16 +39,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.x.f.v.nxv2i16.nxv2f16( , - i32); + iXLen); -define @intrinsic_vfcvt_x.f.v_nxv2i16_nxv2f16( %0, i32 %1) nounwind { +define @intrinsic_vfcvt_x.f.v_nxv2i16_nxv2f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv2i16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -55,7 +57,7 @@ define @intrinsic_vfcvt_x.f.v_nxv2i16_nxv2f16( @llvm.riscv.vfcvt.x.f.v.nxv2i16.nxv2f16( %0, - i32 %1) + iXLen %1) ret %a } @@ -64,10 +66,10 @@ declare @llvm.riscv.vfcvt.x.f.v.mask.nxv2i16.nxv2f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_x.f.v_nxv2i16_nxv2f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfcvt_mask_x.f.v_nxv2i16_nxv2f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv2i16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -78,16 +80,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.x.f.v.nxv4i16.nxv4f16( , - i32); + iXLen); -define @intrinsic_vfcvt_x.f.v_nxv4i16_nxv4f16( %0, i32 %1) nounwind { +define @intrinsic_vfcvt_x.f.v_nxv4i16_nxv4f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv4i16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -96,7 +98,7 @@ define @intrinsic_vfcvt_x.f.v_nxv4i16_nxv4f16( @llvm.riscv.vfcvt.x.f.v.nxv4i16.nxv4f16( %0, - i32 %1) + iXLen %1) ret %a } @@ -105,10 +107,10 @@ declare @llvm.riscv.vfcvt.x.f.v.mask.nxv4i16.nxv4f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_x.f.v_nxv4i16_nxv4f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfcvt_mask_x.f.v_nxv4i16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv4i16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -119,16 +121,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.x.f.v.nxv8i16.nxv8f16( , - i32); + iXLen); -define @intrinsic_vfcvt_x.f.v_nxv8i16_nxv8f16( %0, i32 %1) nounwind { +define @intrinsic_vfcvt_x.f.v_nxv8i16_nxv8f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv8i16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -137,7 +139,7 @@ define @intrinsic_vfcvt_x.f.v_nxv8i16_nxv8f16( @llvm.riscv.vfcvt.x.f.v.nxv8i16.nxv8f16( %0, - i32 %1) + iXLen %1) ret %a } @@ -146,10 +148,10 @@ declare @llvm.riscv.vfcvt.x.f.v.mask.nxv8i16.nxv8f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_x.f.v_nxv8i16_nxv8f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfcvt_mask_x.f.v_nxv8i16_nxv8f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv8i16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -160,16 +162,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.x.f.v.nxv16i16.nxv16f16( , - i32); + iXLen); -define @intrinsic_vfcvt_x.f.v_nxv16i16_nxv16f16( %0, i32 %1) nounwind { +define @intrinsic_vfcvt_x.f.v_nxv16i16_nxv16f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv16i16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -178,7 +180,7 @@ define @intrinsic_vfcvt_x.f.v_nxv16i16_nxv16f16( @llvm.riscv.vfcvt.x.f.v.nxv16i16.nxv16f16( %0, - i32 %1) + iXLen %1) ret %a } @@ -187,10 +189,10 @@ declare @llvm.riscv.vfcvt.x.f.v.mask.nxv16i16.nxv16f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_x.f.v_nxv16i16_nxv16f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfcvt_mask_x.f.v_nxv16i16_nxv16f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv16i16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -201,16 +203,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.x.f.v.nxv32i16.nxv32f16( , - i32); + iXLen); -define @intrinsic_vfcvt_x.f.v_nxv32i16_nxv32f16( %0, i32 %1) nounwind { +define @intrinsic_vfcvt_x.f.v_nxv32i16_nxv32f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv32i16_nxv32f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu @@ -219,7 +221,7 @@ define @intrinsic_vfcvt_x.f.v_nxv32i16_nxv32f16( @llvm.riscv.vfcvt.x.f.v.nxv32i16.nxv32f16( %0, - i32 %1) + iXLen %1) ret %a } @@ -228,10 +230,10 @@ declare @llvm.riscv.vfcvt.x.f.v.mask.nxv32i16.nxv32f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_x.f.v_nxv32i16_nxv32f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfcvt_mask_x.f.v_nxv32i16_nxv32f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv32i16_nxv32f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu @@ -242,16 +244,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.x.f.v.nxv1i32.nxv1f32( , - i32); + iXLen); -define @intrinsic_vfcvt_x.f.v_nxv1i32_nxv1f32( %0, i32 %1) nounwind { +define @intrinsic_vfcvt_x.f.v_nxv1i32_nxv1f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv1i32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -260,7 +262,7 @@ define @intrinsic_vfcvt_x.f.v_nxv1i32_nxv1f32( @llvm.riscv.vfcvt.x.f.v.nxv1i32.nxv1f32( %0, - i32 %1) + iXLen %1) ret %a } @@ -269,10 +271,10 @@ declare @llvm.riscv.vfcvt.x.f.v.mask.nxv1i32.nxv1f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_x.f.v_nxv1i32_nxv1f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfcvt_mask_x.f.v_nxv1i32_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv1i32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -283,16 +285,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.x.f.v.nxv2i32.nxv2f32( , - i32); + iXLen); -define @intrinsic_vfcvt_x.f.v_nxv2i32_nxv2f32( %0, i32 %1) nounwind { +define @intrinsic_vfcvt_x.f.v_nxv2i32_nxv2f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv2i32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -301,7 +303,7 @@ define @intrinsic_vfcvt_x.f.v_nxv2i32_nxv2f32( @llvm.riscv.vfcvt.x.f.v.nxv2i32.nxv2f32( %0, - i32 %1) + iXLen %1) ret %a } @@ -310,10 +312,10 @@ declare @llvm.riscv.vfcvt.x.f.v.mask.nxv2i32.nxv2f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_x.f.v_nxv2i32_nxv2f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfcvt_mask_x.f.v_nxv2i32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv2i32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -324,16 +326,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.x.f.v.nxv4i32.nxv4f32( , - i32); + iXLen); -define @intrinsic_vfcvt_x.f.v_nxv4i32_nxv4f32( %0, i32 %1) nounwind { +define @intrinsic_vfcvt_x.f.v_nxv4i32_nxv4f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv4i32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -342,7 +344,7 @@ define @intrinsic_vfcvt_x.f.v_nxv4i32_nxv4f32( @llvm.riscv.vfcvt.x.f.v.nxv4i32.nxv4f32( %0, - i32 %1) + iXLen %1) ret %a } @@ -351,10 +353,10 @@ declare @llvm.riscv.vfcvt.x.f.v.mask.nxv4i32.nxv4f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_x.f.v_nxv4i32_nxv4f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfcvt_mask_x.f.v_nxv4i32_nxv4f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv4i32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -365,16 +367,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.x.f.v.nxv8i32.nxv8f32( , - i32); + iXLen); -define @intrinsic_vfcvt_x.f.v_nxv8i32_nxv8f32( %0, i32 %1) nounwind { +define @intrinsic_vfcvt_x.f.v_nxv8i32_nxv8f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv8i32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -383,7 +385,7 @@ define @intrinsic_vfcvt_x.f.v_nxv8i32_nxv8f32( @llvm.riscv.vfcvt.x.f.v.nxv8i32.nxv8f32( %0, - i32 %1) + iXLen %1) ret %a } @@ -392,10 +394,10 @@ declare @llvm.riscv.vfcvt.x.f.v.mask.nxv8i32.nxv8f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_x.f.v_nxv8i32_nxv8f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfcvt_mask_x.f.v_nxv8i32_nxv8f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv8i32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -406,16 +408,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.x.f.v.nxv16i32.nxv16f32( , - i32); + iXLen); -define @intrinsic_vfcvt_x.f.v_nxv16i32_nxv16f32( %0, i32 %1) nounwind { +define @intrinsic_vfcvt_x.f.v_nxv16i32_nxv16f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv16i32_nxv16f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu @@ -424,7 +426,7 @@ define @intrinsic_vfcvt_x.f.v_nxv16i32_nxv16f32( @llvm.riscv.vfcvt.x.f.v.nxv16i32.nxv16f32( %0, - i32 %1) + iXLen %1) ret %a } @@ -433,10 +435,10 @@ declare @llvm.riscv.vfcvt.x.f.v.mask.nxv16i32.nxv16f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_x.f.v_nxv16i32_nxv16f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfcvt_mask_x.f.v_nxv16i32_nxv16f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv16i32_nxv16f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu @@ -447,16 +449,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.x.f.v.nxv1i64.nxv1f64( , - i32); + iXLen); -define @intrinsic_vfcvt_x.f.v_nxv1i64_nxv1f64( %0, i32 %1) nounwind { +define @intrinsic_vfcvt_x.f.v_nxv1i64_nxv1f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv1i64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu @@ -465,7 +467,7 @@ define @intrinsic_vfcvt_x.f.v_nxv1i64_nxv1f64( @llvm.riscv.vfcvt.x.f.v.nxv1i64.nxv1f64( %0, - i32 %1) + iXLen %1) ret %a } @@ -474,10 +476,10 @@ declare @llvm.riscv.vfcvt.x.f.v.mask.nxv1i64.nxv1f64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_x.f.v_nxv1i64_nxv1f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfcvt_mask_x.f.v_nxv1i64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv1i64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu @@ -488,16 +490,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.x.f.v.nxv2i64.nxv2f64( , - i32); + iXLen); -define @intrinsic_vfcvt_x.f.v_nxv2i64_nxv2f64( %0, i32 %1) nounwind { +define @intrinsic_vfcvt_x.f.v_nxv2i64_nxv2f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv2i64_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu @@ -506,7 +508,7 @@ define @intrinsic_vfcvt_x.f.v_nxv2i64_nxv2f64( @llvm.riscv.vfcvt.x.f.v.nxv2i64.nxv2f64( %0, - i32 %1) + iXLen %1) ret %a } @@ -515,10 +517,10 @@ declare @llvm.riscv.vfcvt.x.f.v.mask.nxv2i64.nxv2f64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_x.f.v_nxv2i64_nxv2f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfcvt_mask_x.f.v_nxv2i64_nxv2f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv2i64_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu @@ -529,16 +531,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.x.f.v.nxv4i64.nxv4f64( , - i32); + iXLen); -define @intrinsic_vfcvt_x.f.v_nxv4i64_nxv4f64( %0, i32 %1) nounwind { +define @intrinsic_vfcvt_x.f.v_nxv4i64_nxv4f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv4i64_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu @@ -547,7 +549,7 @@ define @intrinsic_vfcvt_x.f.v_nxv4i64_nxv4f64( @llvm.riscv.vfcvt.x.f.v.nxv4i64.nxv4f64( %0, - i32 %1) + iXLen %1) ret %a } @@ -556,10 +558,10 @@ declare @llvm.riscv.vfcvt.x.f.v.mask.nxv4i64.nxv4f64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_x.f.v_nxv4i64_nxv4f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfcvt_mask_x.f.v_nxv4i64_nxv4f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv4i64_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu @@ -570,16 +572,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.x.f.v.nxv8i64.nxv8f64( , - i32); + iXLen); -define @intrinsic_vfcvt_x.f.v_nxv8i64_nxv8f64( %0, i32 %1) nounwind { +define @intrinsic_vfcvt_x.f.v_nxv8i64_nxv8f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv8i64_nxv8f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu @@ -588,7 +590,7 @@ define @intrinsic_vfcvt_x.f.v_nxv8i64_nxv8f64( @llvm.riscv.vfcvt.x.f.v.nxv8i64.nxv8f64( %0, - i32 %1) + iXLen %1) ret %a } @@ -597,10 +599,10 @@ declare @llvm.riscv.vfcvt.x.f.v.mask.nxv8i64.nxv8f64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_x.f.v_nxv8i64_nxv8f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfcvt_mask_x.f.v_nxv8i64_nxv8f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv8i64_nxv8f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu @@ -611,7 +613,7 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfcvt-xu-f-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfcvt-xu-f-rv64.ll deleted file mode 100644 index 8fb44d2d1ecf..000000000000 --- a/llvm/test/CodeGen/RISCV/rvv/vfcvt-xu-f-rv64.ll +++ /dev/null @@ -1,617 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s -declare @llvm.riscv.vfcvt.xu.f.v.nxv1i16.nxv1f16( - , - i64); - -define @intrinsic_vfcvt_xu.f.v_nxv1i16_nxv1f16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv1i16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfcvt.xu.f.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.xu.f.v.nxv1i16.nxv1f16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i16.nxv1f16( - , - , - , - i64, - i64); - -define @intrinsic_vfcvt_mask_xu.f.v_nxv1i16_nxv1f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv1i16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfcvt.xu.f.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i16.nxv1f16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.xu.f.v.nxv2i16.nxv2f16( - , - i64); - -define @intrinsic_vfcvt_xu.f.v_nxv2i16_nxv2f16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv2i16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfcvt.xu.f.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.xu.f.v.nxv2i16.nxv2f16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.xu.f.v.mask.nxv2i16.nxv2f16( - , - , - , - i64, - i64); - -define @intrinsic_vfcvt_mask_xu.f.v_nxv2i16_nxv2f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv2i16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfcvt.xu.f.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv2i16.nxv2f16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.xu.f.v.nxv4i16.nxv4f16( - , - i64); - -define @intrinsic_vfcvt_xu.f.v_nxv4i16_nxv4f16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv4i16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfcvt.xu.f.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.xu.f.v.nxv4i16.nxv4f16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.xu.f.v.mask.nxv4i16.nxv4f16( - , - , - , - i64, - i64); - -define @intrinsic_vfcvt_mask_xu.f.v_nxv4i16_nxv4f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv4i16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfcvt.xu.f.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv4i16.nxv4f16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.xu.f.v.nxv8i16.nxv8f16( - , - i64); - -define @intrinsic_vfcvt_xu.f.v_nxv8i16_nxv8f16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv8i16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfcvt.xu.f.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.xu.f.v.nxv8i16.nxv8f16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.xu.f.v.mask.nxv8i16.nxv8f16( - , - , - , - i64, - i64); - -define @intrinsic_vfcvt_mask_xu.f.v_nxv8i16_nxv8f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv8i16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfcvt.xu.f.v v8, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv8i16.nxv8f16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.xu.f.v.nxv16i16.nxv16f16( - , - i64); - -define @intrinsic_vfcvt_xu.f.v_nxv16i16_nxv16f16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv16i16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfcvt.xu.f.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.xu.f.v.nxv16i16.nxv16f16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.xu.f.v.mask.nxv16i16.nxv16f16( - , - , - , - i64, - i64); - -define @intrinsic_vfcvt_mask_xu.f.v_nxv16i16_nxv16f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv16i16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfcvt.xu.f.v v8, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv16i16.nxv16f16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.xu.f.v.nxv32i16.nxv32f16( - , - i64); - -define @intrinsic_vfcvt_xu.f.v_nxv32i16_nxv32f16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv32i16_nxv32f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu -; CHECK-NEXT: vfcvt.xu.f.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.xu.f.v.nxv32i16.nxv32f16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.xu.f.v.mask.nxv32i16.nxv32f16( - , - , - , - i64, - i64); - -define @intrinsic_vfcvt_mask_xu.f.v_nxv32i16_nxv32f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv32i16_nxv32f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu -; CHECK-NEXT: vfcvt.xu.f.v v8, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv32i16.nxv32f16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.xu.f.v.nxv1i32.nxv1f32( - , - i64); - -define @intrinsic_vfcvt_xu.f.v_nxv1i32_nxv1f32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv1i32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfcvt.xu.f.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.xu.f.v.nxv1i32.nxv1f32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i32.nxv1f32( - , - , - , - i64, - i64); - -define @intrinsic_vfcvt_mask_xu.f.v_nxv1i32_nxv1f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv1i32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfcvt.xu.f.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i32.nxv1f32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.xu.f.v.nxv2i32.nxv2f32( - , - i64); - -define @intrinsic_vfcvt_xu.f.v_nxv2i32_nxv2f32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv2i32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfcvt.xu.f.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.xu.f.v.nxv2i32.nxv2f32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.xu.f.v.mask.nxv2i32.nxv2f32( - , - , - , - i64, - i64); - -define @intrinsic_vfcvt_mask_xu.f.v_nxv2i32_nxv2f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv2i32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfcvt.xu.f.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv2i32.nxv2f32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.xu.f.v.nxv4i32.nxv4f32( - , - i64); - -define @intrinsic_vfcvt_xu.f.v_nxv4i32_nxv4f32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv4i32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfcvt.xu.f.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.xu.f.v.nxv4i32.nxv4f32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.xu.f.v.mask.nxv4i32.nxv4f32( - , - , - , - i64, - i64); - -define @intrinsic_vfcvt_mask_xu.f.v_nxv4i32_nxv4f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv4i32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfcvt.xu.f.v v8, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv4i32.nxv4f32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.xu.f.v.nxv8i32.nxv8f32( - , - i64); - -define @intrinsic_vfcvt_xu.f.v_nxv8i32_nxv8f32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv8i32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfcvt.xu.f.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.xu.f.v.nxv8i32.nxv8f32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.xu.f.v.mask.nxv8i32.nxv8f32( - , - , - , - i64, - i64); - -define @intrinsic_vfcvt_mask_xu.f.v_nxv8i32_nxv8f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv8i32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfcvt.xu.f.v v8, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv8i32.nxv8f32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.xu.f.v.nxv16i32.nxv16f32( - , - i64); - -define @intrinsic_vfcvt_xu.f.v_nxv16i32_nxv16f32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv16i32_nxv16f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu -; CHECK-NEXT: vfcvt.xu.f.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.xu.f.v.nxv16i32.nxv16f32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.xu.f.v.mask.nxv16i32.nxv16f32( - , - , - , - i64, - i64); - -define @intrinsic_vfcvt_mask_xu.f.v_nxv16i32_nxv16f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv16i32_nxv16f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu -; CHECK-NEXT: vfcvt.xu.f.v v8, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv16i32.nxv16f32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.xu.f.v.nxv1i64.nxv1f64( - , - i64); - -define @intrinsic_vfcvt_xu.f.v_nxv1i64_nxv1f64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv1i64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vfcvt.xu.f.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.xu.f.v.nxv1i64.nxv1f64( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i64.nxv1f64( - , - , - , - i64, - i64); - -define @intrinsic_vfcvt_mask_xu.f.v_nxv1i64_nxv1f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv1i64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vfcvt.xu.f.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i64.nxv1f64( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.xu.f.v.nxv2i64.nxv2f64( - , - i64); - -define @intrinsic_vfcvt_xu.f.v_nxv2i64_nxv2f64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv2i64_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vfcvt.xu.f.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.xu.f.v.nxv2i64.nxv2f64( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.xu.f.v.mask.nxv2i64.nxv2f64( - , - , - , - i64, - i64); - -define @intrinsic_vfcvt_mask_xu.f.v_nxv2i64_nxv2f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv2i64_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vfcvt.xu.f.v v8, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv2i64.nxv2f64( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.xu.f.v.nxv4i64.nxv4f64( - , - i64); - -define @intrinsic_vfcvt_xu.f.v_nxv4i64_nxv4f64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv4i64_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vfcvt.xu.f.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.xu.f.v.nxv4i64.nxv4f64( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.xu.f.v.mask.nxv4i64.nxv4f64( - , - , - , - i64, - i64); - -define @intrinsic_vfcvt_mask_xu.f.v_nxv4i64_nxv4f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv4i64_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vfcvt.xu.f.v v8, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv4i64.nxv4f64( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfcvt.xu.f.v.nxv8i64.nxv8f64( - , - i64); - -define @intrinsic_vfcvt_xu.f.v_nxv8i64_nxv8f64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv8i64_nxv8f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu -; CHECK-NEXT: vfcvt.xu.f.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.xu.f.v.nxv8i64.nxv8f64( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfcvt.xu.f.v.mask.nxv8i64.nxv8f64( - , - , - , - i64, - i64); - -define @intrinsic_vfcvt_mask_xu.f.v_nxv8i64_nxv8f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv8i64_nxv8f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu -; CHECK-NEXT: vfcvt.xu.f.v v8, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv8i64.nxv8f64( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfcvt-xu-f-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfcvt-xu-f.ll similarity index 87% rename from llvm/test/CodeGen/RISCV/rvv/vfcvt-xu-f-rv32.ll rename to llvm/test/CodeGen/RISCV/rvv/vfcvt-xu-f.ll index e76b0db05446..fb8a4797f0ae 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfcvt-xu-f-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfcvt-xu-f.ll @@ -1,11 +1,13 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfcvt.xu.f.v.nxv1i16.nxv1f16( , - i32); + iXLen); -define @intrinsic_vfcvt_xu.f.v_nxv1i16_nxv1f16( %0, i32 %1) nounwind { +define @intrinsic_vfcvt_xu.f.v_nxv1i16_nxv1f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv1i16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -14,7 +16,7 @@ define @intrinsic_vfcvt_xu.f.v_nxv1i16_nxv1f16( @llvm.riscv.vfcvt.xu.f.v.nxv1i16.nxv1f16( %0, - i32 %1) + iXLen %1) ret %a } @@ -23,10 +25,10 @@ declare @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i16.nxv1f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_xu.f.v_nxv1i16_nxv1f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfcvt_mask_xu.f.v_nxv1i16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv1i16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -37,16 +39,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.xu.f.v.nxv2i16.nxv2f16( , - i32); + iXLen); -define @intrinsic_vfcvt_xu.f.v_nxv2i16_nxv2f16( %0, i32 %1) nounwind { +define @intrinsic_vfcvt_xu.f.v_nxv2i16_nxv2f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv2i16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -55,7 +57,7 @@ define @intrinsic_vfcvt_xu.f.v_nxv2i16_nxv2f16( @llvm.riscv.vfcvt.xu.f.v.nxv2i16.nxv2f16( %0, - i32 %1) + iXLen %1) ret %a } @@ -64,10 +66,10 @@ declare @llvm.riscv.vfcvt.xu.f.v.mask.nxv2i16.nxv2f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_xu.f.v_nxv2i16_nxv2f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfcvt_mask_xu.f.v_nxv2i16_nxv2f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv2i16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -78,16 +80,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.xu.f.v.nxv4i16.nxv4f16( , - i32); + iXLen); -define @intrinsic_vfcvt_xu.f.v_nxv4i16_nxv4f16( %0, i32 %1) nounwind { +define @intrinsic_vfcvt_xu.f.v_nxv4i16_nxv4f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv4i16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -96,7 +98,7 @@ define @intrinsic_vfcvt_xu.f.v_nxv4i16_nxv4f16( @llvm.riscv.vfcvt.xu.f.v.nxv4i16.nxv4f16( %0, - i32 %1) + iXLen %1) ret %a } @@ -105,10 +107,10 @@ declare @llvm.riscv.vfcvt.xu.f.v.mask.nxv4i16.nxv4f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_xu.f.v_nxv4i16_nxv4f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfcvt_mask_xu.f.v_nxv4i16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv4i16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -119,16 +121,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.xu.f.v.nxv8i16.nxv8f16( , - i32); + iXLen); -define @intrinsic_vfcvt_xu.f.v_nxv8i16_nxv8f16( %0, i32 %1) nounwind { +define @intrinsic_vfcvt_xu.f.v_nxv8i16_nxv8f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv8i16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -137,7 +139,7 @@ define @intrinsic_vfcvt_xu.f.v_nxv8i16_nxv8f16( @llvm.riscv.vfcvt.xu.f.v.nxv8i16.nxv8f16( %0, - i32 %1) + iXLen %1) ret %a } @@ -146,10 +148,10 @@ declare @llvm.riscv.vfcvt.xu.f.v.mask.nxv8i16.nxv8f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_xu.f.v_nxv8i16_nxv8f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfcvt_mask_xu.f.v_nxv8i16_nxv8f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv8i16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -160,16 +162,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.xu.f.v.nxv16i16.nxv16f16( , - i32); + iXLen); -define @intrinsic_vfcvt_xu.f.v_nxv16i16_nxv16f16( %0, i32 %1) nounwind { +define @intrinsic_vfcvt_xu.f.v_nxv16i16_nxv16f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv16i16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -178,7 +180,7 @@ define @intrinsic_vfcvt_xu.f.v_nxv16i16_nxv16f16( @llvm.riscv.vfcvt.xu.f.v.nxv16i16.nxv16f16( %0, - i32 %1) + iXLen %1) ret %a } @@ -187,10 +189,10 @@ declare @llvm.riscv.vfcvt.xu.f.v.mask.nxv16i16.nxv16f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_xu.f.v_nxv16i16_nxv16f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfcvt_mask_xu.f.v_nxv16i16_nxv16f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv16i16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -201,16 +203,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.xu.f.v.nxv32i16.nxv32f16( , - i32); + iXLen); -define @intrinsic_vfcvt_xu.f.v_nxv32i16_nxv32f16( %0, i32 %1) nounwind { +define @intrinsic_vfcvt_xu.f.v_nxv32i16_nxv32f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv32i16_nxv32f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu @@ -219,7 +221,7 @@ define @intrinsic_vfcvt_xu.f.v_nxv32i16_nxv32f16( @llvm.riscv.vfcvt.xu.f.v.nxv32i16.nxv32f16( %0, - i32 %1) + iXLen %1) ret %a } @@ -228,10 +230,10 @@ declare @llvm.riscv.vfcvt.xu.f.v.mask.nxv32i16.nxv32f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_xu.f.v_nxv32i16_nxv32f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfcvt_mask_xu.f.v_nxv32i16_nxv32f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv32i16_nxv32f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu @@ -242,16 +244,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.xu.f.v.nxv1i32.nxv1f32( , - i32); + iXLen); -define @intrinsic_vfcvt_xu.f.v_nxv1i32_nxv1f32( %0, i32 %1) nounwind { +define @intrinsic_vfcvt_xu.f.v_nxv1i32_nxv1f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv1i32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -260,7 +262,7 @@ define @intrinsic_vfcvt_xu.f.v_nxv1i32_nxv1f32( @llvm.riscv.vfcvt.xu.f.v.nxv1i32.nxv1f32( %0, - i32 %1) + iXLen %1) ret %a } @@ -269,10 +271,10 @@ declare @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i32.nxv1f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_xu.f.v_nxv1i32_nxv1f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfcvt_mask_xu.f.v_nxv1i32_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv1i32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -283,16 +285,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.xu.f.v.nxv2i32.nxv2f32( , - i32); + iXLen); -define @intrinsic_vfcvt_xu.f.v_nxv2i32_nxv2f32( %0, i32 %1) nounwind { +define @intrinsic_vfcvt_xu.f.v_nxv2i32_nxv2f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv2i32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -301,7 +303,7 @@ define @intrinsic_vfcvt_xu.f.v_nxv2i32_nxv2f32( @llvm.riscv.vfcvt.xu.f.v.nxv2i32.nxv2f32( %0, - i32 %1) + iXLen %1) ret %a } @@ -310,10 +312,10 @@ declare @llvm.riscv.vfcvt.xu.f.v.mask.nxv2i32.nxv2f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_xu.f.v_nxv2i32_nxv2f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfcvt_mask_xu.f.v_nxv2i32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv2i32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -324,16 +326,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.xu.f.v.nxv4i32.nxv4f32( , - i32); + iXLen); -define @intrinsic_vfcvt_xu.f.v_nxv4i32_nxv4f32( %0, i32 %1) nounwind { +define @intrinsic_vfcvt_xu.f.v_nxv4i32_nxv4f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv4i32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -342,7 +344,7 @@ define @intrinsic_vfcvt_xu.f.v_nxv4i32_nxv4f32( @llvm.riscv.vfcvt.xu.f.v.nxv4i32.nxv4f32( %0, - i32 %1) + iXLen %1) ret %a } @@ -351,10 +353,10 @@ declare @llvm.riscv.vfcvt.xu.f.v.mask.nxv4i32.nxv4f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_xu.f.v_nxv4i32_nxv4f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfcvt_mask_xu.f.v_nxv4i32_nxv4f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv4i32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -365,16 +367,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.xu.f.v.nxv8i32.nxv8f32( , - i32); + iXLen); -define @intrinsic_vfcvt_xu.f.v_nxv8i32_nxv8f32( %0, i32 %1) nounwind { +define @intrinsic_vfcvt_xu.f.v_nxv8i32_nxv8f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv8i32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -383,7 +385,7 @@ define @intrinsic_vfcvt_xu.f.v_nxv8i32_nxv8f32( @llvm.riscv.vfcvt.xu.f.v.nxv8i32.nxv8f32( %0, - i32 %1) + iXLen %1) ret %a } @@ -392,10 +394,10 @@ declare @llvm.riscv.vfcvt.xu.f.v.mask.nxv8i32.nxv8f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_xu.f.v_nxv8i32_nxv8f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfcvt_mask_xu.f.v_nxv8i32_nxv8f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv8i32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -406,16 +408,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.xu.f.v.nxv16i32.nxv16f32( , - i32); + iXLen); -define @intrinsic_vfcvt_xu.f.v_nxv16i32_nxv16f32( %0, i32 %1) nounwind { +define @intrinsic_vfcvt_xu.f.v_nxv16i32_nxv16f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv16i32_nxv16f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu @@ -424,7 +426,7 @@ define @intrinsic_vfcvt_xu.f.v_nxv16i32_nxv16f32( @llvm.riscv.vfcvt.xu.f.v.nxv16i32.nxv16f32( %0, - i32 %1) + iXLen %1) ret %a } @@ -433,10 +435,10 @@ declare @llvm.riscv.vfcvt.xu.f.v.mask.nxv16i32.nxv16f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_xu.f.v_nxv16i32_nxv16f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfcvt_mask_xu.f.v_nxv16i32_nxv16f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv16i32_nxv16f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu @@ -447,16 +449,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.xu.f.v.nxv1i64.nxv1f64( , - i32); + iXLen); -define @intrinsic_vfcvt_xu.f.v_nxv1i64_nxv1f64( %0, i32 %1) nounwind { +define @intrinsic_vfcvt_xu.f.v_nxv1i64_nxv1f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv1i64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu @@ -465,7 +467,7 @@ define @intrinsic_vfcvt_xu.f.v_nxv1i64_nxv1f64( @llvm.riscv.vfcvt.xu.f.v.nxv1i64.nxv1f64( %0, - i32 %1) + iXLen %1) ret %a } @@ -474,10 +476,10 @@ declare @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i64.nxv1f64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_xu.f.v_nxv1i64_nxv1f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfcvt_mask_xu.f.v_nxv1i64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv1i64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu @@ -488,16 +490,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.xu.f.v.nxv2i64.nxv2f64( , - i32); + iXLen); -define @intrinsic_vfcvt_xu.f.v_nxv2i64_nxv2f64( %0, i32 %1) nounwind { +define @intrinsic_vfcvt_xu.f.v_nxv2i64_nxv2f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv2i64_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu @@ -506,7 +508,7 @@ define @intrinsic_vfcvt_xu.f.v_nxv2i64_nxv2f64( @llvm.riscv.vfcvt.xu.f.v.nxv2i64.nxv2f64( %0, - i32 %1) + iXLen %1) ret %a } @@ -515,10 +517,10 @@ declare @llvm.riscv.vfcvt.xu.f.v.mask.nxv2i64.nxv2f64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_xu.f.v_nxv2i64_nxv2f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfcvt_mask_xu.f.v_nxv2i64_nxv2f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv2i64_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu @@ -529,16 +531,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.xu.f.v.nxv4i64.nxv4f64( , - i32); + iXLen); -define @intrinsic_vfcvt_xu.f.v_nxv4i64_nxv4f64( %0, i32 %1) nounwind { +define @intrinsic_vfcvt_xu.f.v_nxv4i64_nxv4f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv4i64_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu @@ -547,7 +549,7 @@ define @intrinsic_vfcvt_xu.f.v_nxv4i64_nxv4f64( @llvm.riscv.vfcvt.xu.f.v.nxv4i64.nxv4f64( %0, - i32 %1) + iXLen %1) ret %a } @@ -556,10 +558,10 @@ declare @llvm.riscv.vfcvt.xu.f.v.mask.nxv4i64.nxv4f64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_xu.f.v_nxv4i64_nxv4f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfcvt_mask_xu.f.v_nxv4i64_nxv4f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv4i64_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu @@ -570,16 +572,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.xu.f.v.nxv8i64.nxv8f64( , - i32); + iXLen); -define @intrinsic_vfcvt_xu.f.v_nxv8i64_nxv8f64( %0, i32 %1) nounwind { +define @intrinsic_vfcvt_xu.f.v_nxv8i64_nxv8f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv8i64_nxv8f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu @@ -588,7 +590,7 @@ define @intrinsic_vfcvt_xu.f.v_nxv8i64_nxv8f64( @llvm.riscv.vfcvt.xu.f.v.nxv8i64.nxv8f64( %0, - i32 %1) + iXLen %1) ret %a } @@ -597,10 +599,10 @@ declare @llvm.riscv.vfcvt.xu.f.v.mask.nxv8i64.nxv8f64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfcvt_mask_xu.f.v_nxv8i64_nxv8f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfcvt_mask_xu.f.v_nxv8i64_nxv8f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv8i64_nxv8f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu @@ -611,7 +613,7 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfdiv-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfdiv-rv64.ll deleted file mode 100644 index 2d4a16e1bf4e..000000000000 --- a/llvm/test/CodeGen/RISCV/rvv/vfdiv-rv64.ll +++ /dev/null @@ -1,1355 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: -target-abi=lp64d < %s | FileCheck %s -declare @llvm.riscv.vfdiv.nxv1f16( - , - , - i64); - -define @intrinsic_vfdiv_vv_nxv1f16_nxv1f16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfdiv_vv_nxv1f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfdiv.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfdiv.nxv1f16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfdiv.mask.nxv1f16( - , - , - , - , - i64, - i64); - -define @intrinsic_vfdiv_mask_vv_nxv1f16_nxv1f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv1f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfdiv.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfdiv.mask.nxv1f16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfdiv.nxv2f16( - , - , - i64); - -define @intrinsic_vfdiv_vv_nxv2f16_nxv2f16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfdiv_vv_nxv2f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfdiv.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfdiv.nxv2f16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfdiv.mask.nxv2f16( - , - , - , - , - i64, - i64); - -define @intrinsic_vfdiv_mask_vv_nxv2f16_nxv2f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv2f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfdiv.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfdiv.mask.nxv2f16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfdiv.nxv4f16( - , - , - i64); - -define @intrinsic_vfdiv_vv_nxv4f16_nxv4f16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfdiv_vv_nxv4f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfdiv.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfdiv.nxv4f16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfdiv.mask.nxv4f16( - , - , - , - , - i64, - i64); - -define @intrinsic_vfdiv_mask_vv_nxv4f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv4f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfdiv.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfdiv.mask.nxv4f16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfdiv.nxv8f16( - , - , - i64); - -define @intrinsic_vfdiv_vv_nxv8f16_nxv8f16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfdiv_vv_nxv8f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfdiv.vv v8, v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfdiv.nxv8f16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfdiv.mask.nxv8f16( - , - , - , - , - i64, - i64); - -define @intrinsic_vfdiv_mask_vv_nxv8f16_nxv8f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv8f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfdiv.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfdiv.mask.nxv8f16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfdiv.nxv16f16( - , - , - i64); - -define @intrinsic_vfdiv_vv_nxv16f16_nxv16f16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfdiv_vv_nxv16f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfdiv.vv v8, v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfdiv.nxv16f16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfdiv.mask.nxv16f16( - , - , - , - , - i64, - i64); - -define @intrinsic_vfdiv_mask_vv_nxv16f16_nxv16f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv16f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfdiv.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfdiv.mask.nxv16f16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfdiv.nxv32f16( - , - , - i64); - -define @intrinsic_vfdiv_vv_nxv32f16_nxv32f16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfdiv_vv_nxv32f16_nxv32f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu -; CHECK-NEXT: vfdiv.vv v8, v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfdiv.nxv32f16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfdiv.mask.nxv32f16( - , - , - , - , - i64, - i64); - -define @intrinsic_vfdiv_mask_vv_nxv32f16_nxv32f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv32f16_nxv32f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu -; CHECK-NEXT: vfdiv.vv v8, v16, v24, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfdiv.mask.nxv32f16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfdiv.nxv1f32( - , - , - i64); - -define @intrinsic_vfdiv_vv_nxv1f32_nxv1f32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfdiv_vv_nxv1f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfdiv.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfdiv.nxv1f32( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfdiv.mask.nxv1f32( - , - , - , - , - i64, - i64); - -define @intrinsic_vfdiv_mask_vv_nxv1f32_nxv1f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv1f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfdiv.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfdiv.mask.nxv1f32( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfdiv.nxv2f32( - , - , - i64); - -define @intrinsic_vfdiv_vv_nxv2f32_nxv2f32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfdiv_vv_nxv2f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfdiv.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfdiv.nxv2f32( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfdiv.mask.nxv2f32( - , - , - , - , - i64, - i64); - -define @intrinsic_vfdiv_mask_vv_nxv2f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv2f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfdiv.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfdiv.mask.nxv2f32( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfdiv.nxv4f32( - , - , - i64); - -define @intrinsic_vfdiv_vv_nxv4f32_nxv4f32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfdiv_vv_nxv4f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfdiv.vv v8, v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfdiv.nxv4f32( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfdiv.mask.nxv4f32( - , - , - , - , - i64, - i64); - -define @intrinsic_vfdiv_mask_vv_nxv4f32_nxv4f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv4f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfdiv.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfdiv.mask.nxv4f32( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfdiv.nxv8f32( - , - , - i64); - -define @intrinsic_vfdiv_vv_nxv8f32_nxv8f32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfdiv_vv_nxv8f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfdiv.vv v8, v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfdiv.nxv8f32( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfdiv.mask.nxv8f32( - , - , - , - , - i64, - i64); - -define @intrinsic_vfdiv_mask_vv_nxv8f32_nxv8f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv8f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfdiv.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfdiv.mask.nxv8f32( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfdiv.nxv16f32( - , - , - i64); - -define @intrinsic_vfdiv_vv_nxv16f32_nxv16f32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfdiv_vv_nxv16f32_nxv16f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu -; CHECK-NEXT: vfdiv.vv v8, v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfdiv.nxv16f32( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfdiv.mask.nxv16f32( - , - , - , - , - i64, - i64); - -define @intrinsic_vfdiv_mask_vv_nxv16f32_nxv16f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv16f32_nxv16f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vl8re32.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu -; CHECK-NEXT: vfdiv.vv v8, v16, v24, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfdiv.mask.nxv16f32( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfdiv.nxv1f64( - , - , - i64); - -define @intrinsic_vfdiv_vv_nxv1f64_nxv1f64( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfdiv_vv_nxv1f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vfdiv.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfdiv.nxv1f64( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfdiv.mask.nxv1f64( - , - , - , - , - i64, - i64); - -define @intrinsic_vfdiv_mask_vv_nxv1f64_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv1f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vfdiv.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfdiv.mask.nxv1f64( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfdiv.nxv2f64( - , - , - i64); - -define @intrinsic_vfdiv_vv_nxv2f64_nxv2f64( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfdiv_vv_nxv2f64_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vfdiv.vv v8, v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfdiv.nxv2f64( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfdiv.mask.nxv2f64( - , - , - , - , - i64, - i64); - -define @intrinsic_vfdiv_mask_vv_nxv2f64_nxv2f64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv2f64_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vfdiv.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfdiv.mask.nxv2f64( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfdiv.nxv4f64( - , - , - i64); - -define @intrinsic_vfdiv_vv_nxv4f64_nxv4f64( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfdiv_vv_nxv4f64_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vfdiv.vv v8, v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfdiv.nxv4f64( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfdiv.mask.nxv4f64( - , - , - , - , - i64, - i64); - -define @intrinsic_vfdiv_mask_vv_nxv4f64_nxv4f64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv4f64_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vfdiv.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfdiv.mask.nxv4f64( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfdiv.nxv8f64( - , - , - i64); - -define @intrinsic_vfdiv_vv_nxv8f64_nxv8f64( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfdiv_vv_nxv8f64_nxv8f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu -; CHECK-NEXT: vfdiv.vv v8, v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfdiv.nxv8f64( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfdiv.mask.nxv8f64( - , - , - , - , - i64, - i64); - -define @intrinsic_vfdiv_mask_vv_nxv8f64_nxv8f64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv8f64_nxv8f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vl8re64.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu -; CHECK-NEXT: vfdiv.vv v8, v16, v24, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfdiv.mask.nxv8f64( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfdiv.nxv1f16.f16( - , - half, - i64); - -define @intrinsic_vfdiv_vf_nxv1f16_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfdiv_vf_nxv1f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfdiv.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfdiv.nxv1f16.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfdiv.mask.nxv1f16.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfdiv_mask_vf_nxv1f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv1f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfdiv.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfdiv.mask.nxv1f16.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfdiv.nxv2f16.f16( - , - half, - i64); - -define @intrinsic_vfdiv_vf_nxv2f16_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfdiv_vf_nxv2f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfdiv.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfdiv.nxv2f16.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfdiv.mask.nxv2f16.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfdiv_mask_vf_nxv2f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv2f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfdiv.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfdiv.mask.nxv2f16.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfdiv.nxv4f16.f16( - , - half, - i64); - -define @intrinsic_vfdiv_vf_nxv4f16_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfdiv_vf_nxv4f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfdiv.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfdiv.nxv4f16.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfdiv.mask.nxv4f16.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfdiv_mask_vf_nxv4f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv4f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfdiv.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfdiv.mask.nxv4f16.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfdiv.nxv8f16.f16( - , - half, - i64); - -define @intrinsic_vfdiv_vf_nxv8f16_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfdiv_vf_nxv8f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfdiv.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfdiv.nxv8f16.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfdiv.mask.nxv8f16.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfdiv_mask_vf_nxv8f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv8f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfdiv.vf v8, v10, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfdiv.mask.nxv8f16.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfdiv.nxv16f16.f16( - , - half, - i64); - -define @intrinsic_vfdiv_vf_nxv16f16_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfdiv_vf_nxv16f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfdiv.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfdiv.nxv16f16.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfdiv.mask.nxv16f16.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfdiv_mask_vf_nxv16f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv16f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfdiv.vf v8, v12, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfdiv.mask.nxv16f16.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfdiv.nxv32f16.f16( - , - half, - i64); - -define @intrinsic_vfdiv_vf_nxv32f16_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfdiv_vf_nxv32f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu -; CHECK-NEXT: vfdiv.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfdiv.nxv32f16.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfdiv.mask.nxv32f16.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfdiv_mask_vf_nxv32f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv32f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu -; CHECK-NEXT: vfdiv.vf v8, v16, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfdiv.mask.nxv32f16.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfdiv.nxv1f32.f32( - , - float, - i64); - -define @intrinsic_vfdiv_vf_nxv1f32_f32( %0, float %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfdiv_vf_nxv1f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfdiv.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfdiv.nxv1f32.f32( - %0, - float %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfdiv.mask.nxv1f32.f32( - , - , - float, - , - i64, - i64); - -define @intrinsic_vfdiv_mask_vf_nxv1f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv1f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfdiv.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfdiv.mask.nxv1f32.f32( - %0, - %1, - float %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfdiv.nxv2f32.f32( - , - float, - i64); - -define @intrinsic_vfdiv_vf_nxv2f32_f32( %0, float %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfdiv_vf_nxv2f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfdiv.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfdiv.nxv2f32.f32( - %0, - float %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfdiv.mask.nxv2f32.f32( - , - , - float, - , - i64, - i64); - -define @intrinsic_vfdiv_mask_vf_nxv2f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv2f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfdiv.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfdiv.mask.nxv2f32.f32( - %0, - %1, - float %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfdiv.nxv4f32.f32( - , - float, - i64); - -define @intrinsic_vfdiv_vf_nxv4f32_f32( %0, float %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfdiv_vf_nxv4f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfdiv.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfdiv.nxv4f32.f32( - %0, - float %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfdiv.mask.nxv4f32.f32( - , - , - float, - , - i64, - i64); - -define @intrinsic_vfdiv_mask_vf_nxv4f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv4f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfdiv.vf v8, v10, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfdiv.mask.nxv4f32.f32( - %0, - %1, - float %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfdiv.nxv8f32.f32( - , - float, - i64); - -define @intrinsic_vfdiv_vf_nxv8f32_f32( %0, float %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfdiv_vf_nxv8f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfdiv.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfdiv.nxv8f32.f32( - %0, - float %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfdiv.mask.nxv8f32.f32( - , - , - float, - , - i64, - i64); - -define @intrinsic_vfdiv_mask_vf_nxv8f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv8f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfdiv.vf v8, v12, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfdiv.mask.nxv8f32.f32( - %0, - %1, - float %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfdiv.nxv16f32.f32( - , - float, - i64); - -define @intrinsic_vfdiv_vf_nxv16f32_f32( %0, float %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfdiv_vf_nxv16f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu -; CHECK-NEXT: vfdiv.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfdiv.nxv16f32.f32( - %0, - float %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfdiv.mask.nxv16f32.f32( - , - , - float, - , - i64, - i64); - -define @intrinsic_vfdiv_mask_vf_nxv16f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv16f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu -; CHECK-NEXT: vfdiv.vf v8, v16, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfdiv.mask.nxv16f32.f32( - %0, - %1, - float %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfdiv.nxv1f64.f64( - , - double, - i64); - -define @intrinsic_vfdiv_vf_nxv1f64_f64( %0, double %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfdiv_vf_nxv1f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vfdiv.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfdiv.nxv1f64.f64( - %0, - double %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfdiv.mask.nxv1f64.f64( - , - , - double, - , - i64, - i64); - -define @intrinsic_vfdiv_mask_vf_nxv1f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv1f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vfdiv.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfdiv.mask.nxv1f64.f64( - %0, - %1, - double %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfdiv.nxv2f64.f64( - , - double, - i64); - -define @intrinsic_vfdiv_vf_nxv2f64_f64( %0, double %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfdiv_vf_nxv2f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vfdiv.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfdiv.nxv2f64.f64( - %0, - double %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfdiv.mask.nxv2f64.f64( - , - , - double, - , - i64, - i64); - -define @intrinsic_vfdiv_mask_vf_nxv2f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv2f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vfdiv.vf v8, v10, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfdiv.mask.nxv2f64.f64( - %0, - %1, - double %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfdiv.nxv4f64.f64( - , - double, - i64); - -define @intrinsic_vfdiv_vf_nxv4f64_f64( %0, double %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfdiv_vf_nxv4f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vfdiv.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfdiv.nxv4f64.f64( - %0, - double %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfdiv.mask.nxv4f64.f64( - , - , - double, - , - i64, - i64); - -define @intrinsic_vfdiv_mask_vf_nxv4f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv4f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vfdiv.vf v8, v12, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfdiv.mask.nxv4f64.f64( - %0, - %1, - double %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfdiv.nxv8f64.f64( - , - double, - i64); - -define @intrinsic_vfdiv_vf_nxv8f64_f64( %0, double %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfdiv_vf_nxv8f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu -; CHECK-NEXT: vfdiv.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfdiv.nxv8f64.f64( - %0, - double %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfdiv.mask.nxv8f64.f64( - , - , - double, - , - i64, - i64); - -define @intrinsic_vfdiv_mask_vf_nxv8f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv8f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu -; CHECK-NEXT: vfdiv.vf v8, v16, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfdiv.mask.nxv8f64.f64( - %0, - %1, - double %2, - %3, - i64 %4, i64 1) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfdiv-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfdiv.ll similarity index 86% rename from llvm/test/CodeGen/RISCV/rvv/vfdiv-rv32.ll rename to llvm/test/CodeGen/RISCV/rvv/vfdiv.ll index 01bfb50ed9b4..0145f2ad764e 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfdiv-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfdiv.ll @@ -1,12 +1,14 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: -target-abi=ilp32d < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfdiv.nxv1f16.nxv1f16( , , - i32); + iXLen); -define @intrinsic_vfdiv_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, i32 %2) nounwind { +define @intrinsic_vfdiv_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -16,7 +18,7 @@ entry: %a = call @llvm.riscv.vfdiv.nxv1f16.nxv1f16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -26,10 +28,10 @@ declare @llvm.riscv.vfdiv.mask.nxv1f16.nxv1f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfdiv_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfdiv_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -41,7 +43,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -49,9 +51,9 @@ entry: declare @llvm.riscv.vfdiv.nxv2f16.nxv2f16( , , - i32); + iXLen); -define @intrinsic_vfdiv_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, i32 %2) nounwind { +define @intrinsic_vfdiv_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -61,7 +63,7 @@ entry: %a = call @llvm.riscv.vfdiv.nxv2f16.nxv2f16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -71,10 +73,10 @@ declare @llvm.riscv.vfdiv.mask.nxv2f16.nxv2f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfdiv_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfdiv_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -86,7 +88,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -94,9 +96,9 @@ entry: declare @llvm.riscv.vfdiv.nxv4f16.nxv4f16( , , - i32); + iXLen); -define @intrinsic_vfdiv_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, i32 %2) nounwind { +define @intrinsic_vfdiv_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -106,7 +108,7 @@ entry: %a = call @llvm.riscv.vfdiv.nxv4f16.nxv4f16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -116,10 +118,10 @@ declare @llvm.riscv.vfdiv.mask.nxv4f16.nxv4f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfdiv_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfdiv_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -131,7 +133,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -139,9 +141,9 @@ entry: declare @llvm.riscv.vfdiv.nxv8f16.nxv8f16( , , - i32); + iXLen); -define @intrinsic_vfdiv_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, i32 %2) nounwind { +define @intrinsic_vfdiv_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -151,7 +153,7 @@ entry: %a = call @llvm.riscv.vfdiv.nxv8f16.nxv8f16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -161,10 +163,10 @@ declare @llvm.riscv.vfdiv.mask.nxv8f16.nxv8f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfdiv_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfdiv_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -176,7 +178,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -184,9 +186,9 @@ entry: declare @llvm.riscv.vfdiv.nxv16f16.nxv16f16( , , - i32); + iXLen); -define @intrinsic_vfdiv_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, i32 %2) nounwind { +define @intrinsic_vfdiv_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -196,7 +198,7 @@ entry: %a = call @llvm.riscv.vfdiv.nxv16f16.nxv16f16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -206,10 +208,10 @@ declare @llvm.riscv.vfdiv.mask.nxv16f16.nxv16f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfdiv_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfdiv_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -221,7 +223,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -229,9 +231,9 @@ entry: declare @llvm.riscv.vfdiv.nxv32f16.nxv32f16( , , - i32); + iXLen); -define @intrinsic_vfdiv_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, i32 %2) nounwind { +define @intrinsic_vfdiv_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vv_nxv32f16_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu @@ -241,7 +243,7 @@ entry: %a = call @llvm.riscv.vfdiv.nxv32f16.nxv32f16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -251,10 +253,10 @@ declare @llvm.riscv.vfdiv.mask.nxv32f16.nxv32f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfdiv_mask_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfdiv_mask_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv32f16_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re16.v v24, (a0) @@ -267,7 +269,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -275,9 +277,9 @@ entry: declare @llvm.riscv.vfdiv.nxv1f32.nxv1f32( , , - i32); + iXLen); -define @intrinsic_vfdiv_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, i32 %2) nounwind { +define @intrinsic_vfdiv_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -287,7 +289,7 @@ entry: %a = call @llvm.riscv.vfdiv.nxv1f32.nxv1f32( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -297,10 +299,10 @@ declare @llvm.riscv.vfdiv.mask.nxv1f32.nxv1f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfdiv_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfdiv_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -312,7 +314,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -320,9 +322,9 @@ entry: declare @llvm.riscv.vfdiv.nxv2f32.nxv2f32( , , - i32); + iXLen); -define @intrinsic_vfdiv_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, i32 %2) nounwind { +define @intrinsic_vfdiv_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -332,7 +334,7 @@ entry: %a = call @llvm.riscv.vfdiv.nxv2f32.nxv2f32( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -342,10 +344,10 @@ declare @llvm.riscv.vfdiv.mask.nxv2f32.nxv2f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfdiv_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfdiv_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -357,7 +359,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -365,9 +367,9 @@ entry: declare @llvm.riscv.vfdiv.nxv4f32.nxv4f32( , , - i32); + iXLen); -define @intrinsic_vfdiv_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, i32 %2) nounwind { +define @intrinsic_vfdiv_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -377,7 +379,7 @@ entry: %a = call @llvm.riscv.vfdiv.nxv4f32.nxv4f32( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -387,10 +389,10 @@ declare @llvm.riscv.vfdiv.mask.nxv4f32.nxv4f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfdiv_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfdiv_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -402,7 +404,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -410,9 +412,9 @@ entry: declare @llvm.riscv.vfdiv.nxv8f32.nxv8f32( , , - i32); + iXLen); -define @intrinsic_vfdiv_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, i32 %2) nounwind { +define @intrinsic_vfdiv_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -422,7 +424,7 @@ entry: %a = call @llvm.riscv.vfdiv.nxv8f32.nxv8f32( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -432,10 +434,10 @@ declare @llvm.riscv.vfdiv.mask.nxv8f32.nxv8f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfdiv_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfdiv_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -447,7 +449,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -455,9 +457,9 @@ entry: declare @llvm.riscv.vfdiv.nxv16f32.nxv16f32( , , - i32); + iXLen); -define @intrinsic_vfdiv_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, i32 %2) nounwind { +define @intrinsic_vfdiv_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vv_nxv16f32_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu @@ -467,7 +469,7 @@ entry: %a = call @llvm.riscv.vfdiv.nxv16f32.nxv16f32( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -477,10 +479,10 @@ declare @llvm.riscv.vfdiv.mask.nxv16f32.nxv16f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfdiv_mask_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfdiv_mask_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv16f32_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re32.v v24, (a0) @@ -493,7 +495,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -501,9 +503,9 @@ entry: declare @llvm.riscv.vfdiv.nxv1f64.nxv1f64( , , - i32); + iXLen); -define @intrinsic_vfdiv_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, i32 %2) nounwind { +define @intrinsic_vfdiv_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu @@ -513,7 +515,7 @@ entry: %a = call @llvm.riscv.vfdiv.nxv1f64.nxv1f64( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -523,10 +525,10 @@ declare @llvm.riscv.vfdiv.mask.nxv1f64.nxv1f64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfdiv_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfdiv_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu @@ -538,7 +540,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -546,9 +548,9 @@ entry: declare @llvm.riscv.vfdiv.nxv2f64.nxv2f64( , , - i32); + iXLen); -define @intrinsic_vfdiv_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, i32 %2) nounwind { +define @intrinsic_vfdiv_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu @@ -558,7 +560,7 @@ entry: %a = call @llvm.riscv.vfdiv.nxv2f64.nxv2f64( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -568,10 +570,10 @@ declare @llvm.riscv.vfdiv.mask.nxv2f64.nxv2f64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfdiv_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfdiv_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu @@ -583,7 +585,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -591,9 +593,9 @@ entry: declare @llvm.riscv.vfdiv.nxv4f64.nxv4f64( , , - i32); + iXLen); -define @intrinsic_vfdiv_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, i32 %2) nounwind { +define @intrinsic_vfdiv_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu @@ -603,7 +605,7 @@ entry: %a = call @llvm.riscv.vfdiv.nxv4f64.nxv4f64( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -613,10 +615,10 @@ declare @llvm.riscv.vfdiv.mask.nxv4f64.nxv4f64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfdiv_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfdiv_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu @@ -628,7 +630,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -636,9 +638,9 @@ entry: declare @llvm.riscv.vfdiv.nxv8f64.nxv8f64( , , - i32); + iXLen); -define @intrinsic_vfdiv_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, i32 %2) nounwind { +define @intrinsic_vfdiv_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vv_nxv8f64_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu @@ -648,7 +650,7 @@ entry: %a = call @llvm.riscv.vfdiv.nxv8f64.nxv8f64( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -658,10 +660,10 @@ declare @llvm.riscv.vfdiv.mask.nxv8f64.nxv8f64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfdiv_mask_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfdiv_mask_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv8f64_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re64.v v24, (a0) @@ -674,7 +676,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -682,9 +684,9 @@ entry: declare @llvm.riscv.vfdiv.nxv1f16.f16( , half, - i32); + iXLen); -define @intrinsic_vfdiv_vf_nxv1f16_nxv1f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfdiv_vf_nxv1f16_nxv1f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -694,7 +696,7 @@ entry: %a = call @llvm.riscv.vfdiv.nxv1f16.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -704,10 +706,10 @@ declare @llvm.riscv.vfdiv.mask.nxv1f16.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfdiv_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfdiv_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -719,7 +721,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -727,9 +729,9 @@ entry: declare @llvm.riscv.vfdiv.nxv2f16.f16( , half, - i32); + iXLen); -define @intrinsic_vfdiv_vf_nxv2f16_nxv2f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfdiv_vf_nxv2f16_nxv2f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -739,7 +741,7 @@ entry: %a = call @llvm.riscv.vfdiv.nxv2f16.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -749,10 +751,10 @@ declare @llvm.riscv.vfdiv.mask.nxv2f16.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfdiv_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfdiv_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -764,7 +766,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -772,9 +774,9 @@ entry: declare @llvm.riscv.vfdiv.nxv4f16.f16( , half, - i32); + iXLen); -define @intrinsic_vfdiv_vf_nxv4f16_nxv4f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfdiv_vf_nxv4f16_nxv4f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -784,7 +786,7 @@ entry: %a = call @llvm.riscv.vfdiv.nxv4f16.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -794,10 +796,10 @@ declare @llvm.riscv.vfdiv.mask.nxv4f16.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfdiv_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfdiv_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -809,7 +811,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -817,9 +819,9 @@ entry: declare @llvm.riscv.vfdiv.nxv8f16.f16( , half, - i32); + iXLen); -define @intrinsic_vfdiv_vf_nxv8f16_nxv8f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfdiv_vf_nxv8f16_nxv8f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -829,7 +831,7 @@ entry: %a = call @llvm.riscv.vfdiv.nxv8f16.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -839,10 +841,10 @@ declare @llvm.riscv.vfdiv.mask.nxv8f16.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfdiv_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfdiv_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -854,7 +856,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -862,9 +864,9 @@ entry: declare @llvm.riscv.vfdiv.nxv16f16.f16( , half, - i32); + iXLen); -define @intrinsic_vfdiv_vf_nxv16f16_nxv16f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfdiv_vf_nxv16f16_nxv16f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -874,7 +876,7 @@ entry: %a = call @llvm.riscv.vfdiv.nxv16f16.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -884,10 +886,10 @@ declare @llvm.riscv.vfdiv.mask.nxv16f16.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfdiv_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfdiv_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -899,7 +901,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -907,9 +909,9 @@ entry: declare @llvm.riscv.vfdiv.nxv32f16.f16( , half, - i32); + iXLen); -define @intrinsic_vfdiv_vf_nxv32f16_nxv32f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfdiv_vf_nxv32f16_nxv32f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu @@ -919,7 +921,7 @@ entry: %a = call @llvm.riscv.vfdiv.nxv32f16.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -929,10 +931,10 @@ declare @llvm.riscv.vfdiv.mask.nxv32f16.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfdiv_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfdiv_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu @@ -944,7 +946,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -952,9 +954,9 @@ entry: declare @llvm.riscv.vfdiv.nxv1f32.f32( , float, - i32); + iXLen); -define @intrinsic_vfdiv_vf_nxv1f32_nxv1f32_f32( %0, float %1, i32 %2) nounwind { +define @intrinsic_vfdiv_vf_nxv1f32_nxv1f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -964,7 +966,7 @@ entry: %a = call @llvm.riscv.vfdiv.nxv1f32.f32( %0, float %1, - i32 %2) + iXLen %2) ret %a } @@ -974,10 +976,10 @@ declare @llvm.riscv.vfdiv.mask.nxv1f32.f32( , float, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfdiv_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +define @intrinsic_vfdiv_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -989,7 +991,7 @@ entry: %1, float %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -997,9 +999,9 @@ entry: declare @llvm.riscv.vfdiv.nxv2f32.f32( , float, - i32); + iXLen); -define @intrinsic_vfdiv_vf_nxv2f32_nxv2f32_f32( %0, float %1, i32 %2) nounwind { +define @intrinsic_vfdiv_vf_nxv2f32_nxv2f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -1009,7 +1011,7 @@ entry: %a = call @llvm.riscv.vfdiv.nxv2f32.f32( %0, float %1, - i32 %2) + iXLen %2) ret %a } @@ -1019,10 +1021,10 @@ declare @llvm.riscv.vfdiv.mask.nxv2f32.f32( , float, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfdiv_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +define @intrinsic_vfdiv_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -1034,7 +1036,7 @@ entry: %1, float %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1042,9 +1044,9 @@ entry: declare @llvm.riscv.vfdiv.nxv4f32.f32( , float, - i32); + iXLen); -define @intrinsic_vfdiv_vf_nxv4f32_nxv4f32_f32( %0, float %1, i32 %2) nounwind { +define @intrinsic_vfdiv_vf_nxv4f32_nxv4f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -1054,7 +1056,7 @@ entry: %a = call @llvm.riscv.vfdiv.nxv4f32.f32( %0, float %1, - i32 %2) + iXLen %2) ret %a } @@ -1064,10 +1066,10 @@ declare @llvm.riscv.vfdiv.mask.nxv4f32.f32( , float, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfdiv_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +define @intrinsic_vfdiv_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -1079,7 +1081,7 @@ entry: %1, float %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1087,9 +1089,9 @@ entry: declare @llvm.riscv.vfdiv.nxv8f32.f32( , float, - i32); + iXLen); -define @intrinsic_vfdiv_vf_nxv8f32_nxv8f32_f32( %0, float %1, i32 %2) nounwind { +define @intrinsic_vfdiv_vf_nxv8f32_nxv8f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -1099,7 +1101,7 @@ entry: %a = call @llvm.riscv.vfdiv.nxv8f32.f32( %0, float %1, - i32 %2) + iXLen %2) ret %a } @@ -1109,10 +1111,10 @@ declare @llvm.riscv.vfdiv.mask.nxv8f32.f32( , float, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfdiv_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +define @intrinsic_vfdiv_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -1124,7 +1126,7 @@ entry: %1, float %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1132,9 +1134,9 @@ entry: declare @llvm.riscv.vfdiv.nxv16f32.f32( , float, - i32); + iXLen); -define @intrinsic_vfdiv_vf_nxv16f32_nxv16f32_f32( %0, float %1, i32 %2) nounwind { +define @intrinsic_vfdiv_vf_nxv16f32_nxv16f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu @@ -1144,7 +1146,7 @@ entry: %a = call @llvm.riscv.vfdiv.nxv16f32.f32( %0, float %1, - i32 %2) + iXLen %2) ret %a } @@ -1154,10 +1156,10 @@ declare @llvm.riscv.vfdiv.mask.nxv16f32.f32( , float, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfdiv_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +define @intrinsic_vfdiv_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu @@ -1169,7 +1171,7 @@ entry: %1, float %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1177,9 +1179,9 @@ entry: declare @llvm.riscv.vfdiv.nxv1f64.f64( , double, - i32); + iXLen); -define @intrinsic_vfdiv_vf_nxv1f64_nxv1f64_f64( %0, double %1, i32 %2) nounwind { +define @intrinsic_vfdiv_vf_nxv1f64_nxv1f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu @@ -1189,7 +1191,7 @@ entry: %a = call @llvm.riscv.vfdiv.nxv1f64.f64( %0, double %1, - i32 %2) + iXLen %2) ret %a } @@ -1199,10 +1201,10 @@ declare @llvm.riscv.vfdiv.mask.nxv1f64.f64( , double, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfdiv_mask_vf_nxv1f64_nxv1f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { +define @intrinsic_vfdiv_mask_vf_nxv1f64_nxv1f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu @@ -1214,7 +1216,7 @@ entry: %1, double %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1222,9 +1224,9 @@ entry: declare @llvm.riscv.vfdiv.nxv2f64.f64( , double, - i32); + iXLen); -define @intrinsic_vfdiv_vf_nxv2f64_nxv2f64_f64( %0, double %1, i32 %2) nounwind { +define @intrinsic_vfdiv_vf_nxv2f64_nxv2f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu @@ -1234,7 +1236,7 @@ entry: %a = call @llvm.riscv.vfdiv.nxv2f64.f64( %0, double %1, - i32 %2) + iXLen %2) ret %a } @@ -1244,10 +1246,10 @@ declare @llvm.riscv.vfdiv.mask.nxv2f64.f64( , double, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfdiv_mask_vf_nxv2f64_nxv2f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { +define @intrinsic_vfdiv_mask_vf_nxv2f64_nxv2f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu @@ -1259,7 +1261,7 @@ entry: %1, double %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1267,9 +1269,9 @@ entry: declare @llvm.riscv.vfdiv.nxv4f64.f64( , double, - i32); + iXLen); -define @intrinsic_vfdiv_vf_nxv4f64_nxv4f64_f64( %0, double %1, i32 %2) nounwind { +define @intrinsic_vfdiv_vf_nxv4f64_nxv4f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu @@ -1279,7 +1281,7 @@ entry: %a = call @llvm.riscv.vfdiv.nxv4f64.f64( %0, double %1, - i32 %2) + iXLen %2) ret %a } @@ -1289,10 +1291,10 @@ declare @llvm.riscv.vfdiv.mask.nxv4f64.f64( , double, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfdiv_mask_vf_nxv4f64_nxv4f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { +define @intrinsic_vfdiv_mask_vf_nxv4f64_nxv4f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu @@ -1304,7 +1306,7 @@ entry: %1, double %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1312,9 +1314,9 @@ entry: declare @llvm.riscv.vfdiv.nxv8f64.f64( , double, - i32); + iXLen); -define @intrinsic_vfdiv_vf_nxv8f64_nxv8f64_f64( %0, double %1, i32 %2) nounwind { +define @intrinsic_vfdiv_vf_nxv8f64_nxv8f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu @@ -1324,7 +1326,7 @@ entry: %a = call @llvm.riscv.vfdiv.nxv8f64.f64( %0, double %1, - i32 %2) + iXLen %2) ret %a } @@ -1334,10 +1336,10 @@ declare @llvm.riscv.vfdiv.mask.nxv8f64.f64( , double, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfdiv_mask_vf_nxv8f64_nxv8f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { +define @intrinsic_vfdiv_mask_vf_nxv8f64_nxv8f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu @@ -1349,7 +1351,7 @@ entry: %1, double %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmacc-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfmacc-rv32.ll deleted file mode 100644 index 16d305bad846..000000000000 --- a/llvm/test/CodeGen/RISCV/rvv/vfmacc-rv32.ll +++ /dev/null @@ -1,1106 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: -target-abi=ilp32d < %s | FileCheck %s -declare @llvm.riscv.vfmacc.nxv1f16.nxv1f16( - , - , - , - i32); - -define @intrinsic_vfmacc_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmacc_vv_nxv1f16_nxv1f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu -; CHECK-NEXT: vfmacc.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmacc.nxv1f16.nxv1f16( - %0, - %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vfmacc.mask.nxv1f16.nxv1f16( - , - , - , - , - i32); - -define @intrinsic_vfmacc_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv1f16_nxv1f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu -; CHECK-NEXT: vfmacc.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmacc.mask.nxv1f16.nxv1f16( - %0, - %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vfmacc.nxv2f16.nxv2f16( - , - , - , - i32); - -define @intrinsic_vfmacc_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmacc_vv_nxv2f16_nxv2f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu -; CHECK-NEXT: vfmacc.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmacc.nxv2f16.nxv2f16( - %0, - %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vfmacc.mask.nxv2f16.nxv2f16( - , - , - , - , - i32); - -define @intrinsic_vfmacc_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv2f16_nxv2f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu -; CHECK-NEXT: vfmacc.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmacc.mask.nxv2f16.nxv2f16( - %0, - %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vfmacc.nxv4f16.nxv4f16( - , - , - , - i32); - -define @intrinsic_vfmacc_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmacc_vv_nxv4f16_nxv4f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu -; CHECK-NEXT: vfmacc.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmacc.nxv4f16.nxv4f16( - %0, - %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vfmacc.mask.nxv4f16.nxv4f16( - , - , - , - , - i32); - -define @intrinsic_vfmacc_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv4f16_nxv4f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu -; CHECK-NEXT: vfmacc.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmacc.mask.nxv4f16.nxv4f16( - %0, - %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vfmacc.nxv8f16.nxv8f16( - , - , - , - i32); - -define @intrinsic_vfmacc_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmacc_vv_nxv8f16_nxv8f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu -; CHECK-NEXT: vfmacc.vv v8, v10, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmacc.nxv8f16.nxv8f16( - %0, - %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vfmacc.mask.nxv8f16.nxv8f16( - , - , - , - , - i32); - -define @intrinsic_vfmacc_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv8f16_nxv8f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu -; CHECK-NEXT: vfmacc.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmacc.mask.nxv8f16.nxv8f16( - %0, - %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vfmacc.nxv16f16.nxv16f16( - , - , - , - i32); - -define @intrinsic_vfmacc_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmacc_vv_nxv16f16_nxv16f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu -; CHECK-NEXT: vfmacc.vv v8, v12, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmacc.nxv16f16.nxv16f16( - %0, - %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vfmacc.mask.nxv16f16.nxv16f16( - , - , - , - , - i32); - -define @intrinsic_vfmacc_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv16f16_nxv16f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu -; CHECK-NEXT: vfmacc.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmacc.mask.nxv16f16.nxv16f16( - %0, - %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vfmacc.nxv1f32.nxv1f32( - , - , - , - i32); - -define @intrinsic_vfmacc_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmacc_vv_nxv1f32_nxv1f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu -; CHECK-NEXT: vfmacc.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmacc.nxv1f32.nxv1f32( - %0, - %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vfmacc.mask.nxv1f32.nxv1f32( - , - , - , - , - i32); - -define @intrinsic_vfmacc_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv1f32_nxv1f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu -; CHECK-NEXT: vfmacc.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmacc.mask.nxv1f32.nxv1f32( - %0, - %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vfmacc.nxv2f32.nxv2f32( - , - , - , - i32); - -define @intrinsic_vfmacc_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmacc_vv_nxv2f32_nxv2f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu -; CHECK-NEXT: vfmacc.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmacc.nxv2f32.nxv2f32( - %0, - %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vfmacc.mask.nxv2f32.nxv2f32( - , - , - , - , - i32); - -define @intrinsic_vfmacc_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv2f32_nxv2f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu -; CHECK-NEXT: vfmacc.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmacc.mask.nxv2f32.nxv2f32( - %0, - %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vfmacc.nxv4f32.nxv4f32( - , - , - , - i32); - -define @intrinsic_vfmacc_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmacc_vv_nxv4f32_nxv4f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu -; CHECK-NEXT: vfmacc.vv v8, v10, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmacc.nxv4f32.nxv4f32( - %0, - %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vfmacc.mask.nxv4f32.nxv4f32( - , - , - , - , - i32); - -define @intrinsic_vfmacc_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv4f32_nxv4f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu -; CHECK-NEXT: vfmacc.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmacc.mask.nxv4f32.nxv4f32( - %0, - %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vfmacc.nxv8f32.nxv8f32( - , - , - , - i32); - -define @intrinsic_vfmacc_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmacc_vv_nxv8f32_nxv8f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu -; CHECK-NEXT: vfmacc.vv v8, v12, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmacc.nxv8f32.nxv8f32( - %0, - %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vfmacc.mask.nxv8f32.nxv8f32( - , - , - , - , - i32); - -define @intrinsic_vfmacc_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv8f32_nxv8f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu -; CHECK-NEXT: vfmacc.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmacc.mask.nxv8f32.nxv8f32( - %0, - %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vfmacc.nxv1f64.nxv1f64( - , - , - , - i32); - -define @intrinsic_vfmacc_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmacc_vv_nxv1f64_nxv1f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu -; CHECK-NEXT: vfmacc.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmacc.nxv1f64.nxv1f64( - %0, - %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vfmacc.mask.nxv1f64.nxv1f64( - , - , - , - , - i32); - -define @intrinsic_vfmacc_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv1f64_nxv1f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu -; CHECK-NEXT: vfmacc.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmacc.mask.nxv1f64.nxv1f64( - %0, - %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vfmacc.nxv2f64.nxv2f64( - , - , - , - i32); - -define @intrinsic_vfmacc_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmacc_vv_nxv2f64_nxv2f64_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu -; CHECK-NEXT: vfmacc.vv v8, v10, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmacc.nxv2f64.nxv2f64( - %0, - %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vfmacc.mask.nxv2f64.nxv2f64( - , - , - , - , - i32); - -define @intrinsic_vfmacc_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv2f64_nxv2f64_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu -; CHECK-NEXT: vfmacc.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmacc.mask.nxv2f64.nxv2f64( - %0, - %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vfmacc.nxv4f64.nxv4f64( - , - , - , - i32); - -define @intrinsic_vfmacc_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmacc_vv_nxv4f64_nxv4f64_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu -; CHECK-NEXT: vfmacc.vv v8, v12, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmacc.nxv4f64.nxv4f64( - %0, - %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vfmacc.mask.nxv4f64.nxv4f64( - , - , - , - , - i32); - -define @intrinsic_vfmacc_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv4f64_nxv4f64_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu -; CHECK-NEXT: vfmacc.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmacc.mask.nxv4f64.nxv4f64( - %0, - %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vfmacc.nxv1f16.f16( - , - half, - , - i32); - -define @intrinsic_vfmacc_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmacc_vf_nxv1f16_f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu -; CHECK-NEXT: vfmacc.vf v8, fa0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmacc.nxv1f16.f16( - %0, - half %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vfmacc.mask.nxv1f16.f16( - , - half, - , - , - i32); - -define @intrinsic_vfmacc_mask_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv1f16_f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu -; CHECK-NEXT: vfmacc.vf v8, fa0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmacc.mask.nxv1f16.f16( - %0, - half %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vfmacc.nxv2f16.f16( - , - half, - , - i32); - -define @intrinsic_vfmacc_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmacc_vf_nxv2f16_f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu -; CHECK-NEXT: vfmacc.vf v8, fa0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmacc.nxv2f16.f16( - %0, - half %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vfmacc.mask.nxv2f16.f16( - , - half, - , - , - i32); - -define @intrinsic_vfmacc_mask_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv2f16_f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu -; CHECK-NEXT: vfmacc.vf v8, fa0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmacc.mask.nxv2f16.f16( - %0, - half %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vfmacc.nxv4f16.f16( - , - half, - , - i32); - -define @intrinsic_vfmacc_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmacc_vf_nxv4f16_f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu -; CHECK-NEXT: vfmacc.vf v8, fa0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmacc.nxv4f16.f16( - %0, - half %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vfmacc.mask.nxv4f16.f16( - , - half, - , - , - i32); - -define @intrinsic_vfmacc_mask_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv4f16_f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu -; CHECK-NEXT: vfmacc.vf v8, fa0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmacc.mask.nxv4f16.f16( - %0, - half %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vfmacc.nxv8f16.f16( - , - half, - , - i32); - -define @intrinsic_vfmacc_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmacc_vf_nxv8f16_f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu -; CHECK-NEXT: vfmacc.vf v8, fa0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmacc.nxv8f16.f16( - %0, - half %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vfmacc.mask.nxv8f16.f16( - , - half, - , - , - i32); - -define @intrinsic_vfmacc_mask_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv8f16_f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu -; CHECK-NEXT: vfmacc.vf v8, fa0, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmacc.mask.nxv8f16.f16( - %0, - half %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vfmacc.nxv16f16.f16( - , - half, - , - i32); - -define @intrinsic_vfmacc_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmacc_vf_nxv16f16_f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu -; CHECK-NEXT: vfmacc.vf v8, fa0, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmacc.nxv16f16.f16( - %0, - half %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vfmacc.mask.nxv16f16.f16( - , - half, - , - , - i32); - -define @intrinsic_vfmacc_mask_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv16f16_f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu -; CHECK-NEXT: vfmacc.vf v8, fa0, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmacc.mask.nxv16f16.f16( - %0, - half %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vfmacc.nxv1f32.f32( - , - float, - , - i32); - -define @intrinsic_vfmacc_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmacc_vf_nxv1f32_f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu -; CHECK-NEXT: vfmacc.vf v8, fa0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmacc.nxv1f32.f32( - %0, - float %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vfmacc.mask.nxv1f32.f32( - , - float, - , - , - i32); - -define @intrinsic_vfmacc_mask_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv1f32_f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu -; CHECK-NEXT: vfmacc.vf v8, fa0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmacc.mask.nxv1f32.f32( - %0, - float %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vfmacc.nxv2f32.f32( - , - float, - , - i32); - -define @intrinsic_vfmacc_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmacc_vf_nxv2f32_f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu -; CHECK-NEXT: vfmacc.vf v8, fa0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmacc.nxv2f32.f32( - %0, - float %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vfmacc.mask.nxv2f32.f32( - , - float, - , - , - i32); - -define @intrinsic_vfmacc_mask_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv2f32_f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu -; CHECK-NEXT: vfmacc.vf v8, fa0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmacc.mask.nxv2f32.f32( - %0, - float %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vfmacc.nxv4f32.f32( - , - float, - , - i32); - -define @intrinsic_vfmacc_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmacc_vf_nxv4f32_f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu -; CHECK-NEXT: vfmacc.vf v8, fa0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmacc.nxv4f32.f32( - %0, - float %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vfmacc.mask.nxv4f32.f32( - , - float, - , - , - i32); - -define @intrinsic_vfmacc_mask_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv4f32_f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu -; CHECK-NEXT: vfmacc.vf v8, fa0, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmacc.mask.nxv4f32.f32( - %0, - float %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vfmacc.nxv8f32.f32( - , - float, - , - i32); - -define @intrinsic_vfmacc_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmacc_vf_nxv8f32_f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu -; CHECK-NEXT: vfmacc.vf v8, fa0, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmacc.nxv8f32.f32( - %0, - float %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vfmacc.mask.nxv8f32.f32( - , - float, - , - , - i32); - -define @intrinsic_vfmacc_mask_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv8f32_f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu -; CHECK-NEXT: vfmacc.vf v8, fa0, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmacc.mask.nxv8f32.f32( - %0, - float %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vfmacc.nxv1f64.f64( - , - double, - , - i32); - -define @intrinsic_vfmacc_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmacc_vf_nxv1f64_f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu -; CHECK-NEXT: vfmacc.vf v8, fa0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmacc.nxv1f64.f64( - %0, - double %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vfmacc.mask.nxv1f64.f64( - , - double, - , - , - i32); - -define @intrinsic_vfmacc_mask_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv1f64_f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu -; CHECK-NEXT: vfmacc.vf v8, fa0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmacc.mask.nxv1f64.f64( - %0, - double %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vfmacc.nxv2f64.f64( - , - double, - , - i32); - -define @intrinsic_vfmacc_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmacc_vf_nxv2f64_f64_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu -; CHECK-NEXT: vfmacc.vf v8, fa0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmacc.nxv2f64.f64( - %0, - double %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vfmacc.mask.nxv2f64.f64( - , - double, - , - , - i32); - -define @intrinsic_vfmacc_mask_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv2f64_f64_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu -; CHECK-NEXT: vfmacc.vf v8, fa0, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmacc.mask.nxv2f64.f64( - %0, - double %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vfmacc.nxv4f64.f64( - , - double, - , - i32); - -define @intrinsic_vfmacc_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmacc_vf_nxv4f64_f64_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu -; CHECK-NEXT: vfmacc.vf v8, fa0, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmacc.nxv4f64.f64( - %0, - double %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vfmacc.mask.nxv4f64.f64( - , - double, - , - , - i32); - -define @intrinsic_vfmacc_mask_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv4f64_f64_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu -; CHECK-NEXT: vfmacc.vf v8, fa0, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmacc.mask.nxv4f64.f64( - %0, - double %1, - %2, - %3, - i32 %4) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmacc-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfmacc.ll similarity index 89% rename from llvm/test/CodeGen/RISCV/rvv/vfmacc-rv64.ll rename to llvm/test/CodeGen/RISCV/rvv/vfmacc.ll index c5809888ff17..5115a7548e2c 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfmacc-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmacc.ll @@ -1,13 +1,15 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: -target-abi=lp64d < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfmacc.nxv1f16.nxv1f16( , , , - i64); + iXLen); -define @intrinsic_vfmacc_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vfmacc_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu @@ -18,7 +20,7 @@ entry: %0, %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -28,9 +30,9 @@ declare @llvm.riscv.vfmacc.mask.nxv1f16.nxv1f16( , , , - i64); + iXLen); -define @intrinsic_vfmacc_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfmacc_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu @@ -42,7 +44,7 @@ entry: %1, %2, %3, - i64 %4) + iXLen %4) ret %a } @@ -51,9 +53,9 @@ declare @llvm.riscv.vfmacc.nxv2f16.nxv2f16( , , , - i64); + iXLen); -define @intrinsic_vfmacc_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vfmacc_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu @@ -64,7 +66,7 @@ entry: %0, %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -74,9 +76,9 @@ declare @llvm.riscv.vfmacc.mask.nxv2f16.nxv2f16( , , , - i64); + iXLen); -define @intrinsic_vfmacc_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfmacc_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu @@ -88,7 +90,7 @@ entry: %1, %2, %3, - i64 %4) + iXLen %4) ret %a } @@ -97,9 +99,9 @@ declare @llvm.riscv.vfmacc.nxv4f16.nxv4f16( , , , - i64); + iXLen); -define @intrinsic_vfmacc_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vfmacc_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu @@ -110,7 +112,7 @@ entry: %0, %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -120,9 +122,9 @@ declare @llvm.riscv.vfmacc.mask.nxv4f16.nxv4f16( , , , - i64); + iXLen); -define @intrinsic_vfmacc_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfmacc_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu @@ -134,7 +136,7 @@ entry: %1, %2, %3, - i64 %4) + iXLen %4) ret %a } @@ -143,9 +145,9 @@ declare @llvm.riscv.vfmacc.nxv8f16.nxv8f16( , , , - i64); + iXLen); -define @intrinsic_vfmacc_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vfmacc_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu @@ -156,7 +158,7 @@ entry: %0, %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -166,9 +168,9 @@ declare @llvm.riscv.vfmacc.mask.nxv8f16.nxv8f16( , , , - i64); + iXLen); -define @intrinsic_vfmacc_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfmacc_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu @@ -180,7 +182,7 @@ entry: %1, %2, %3, - i64 %4) + iXLen %4) ret %a } @@ -189,9 +191,9 @@ declare @llvm.riscv.vfmacc.nxv16f16.nxv16f16( , , , - i64); + iXLen); -define @intrinsic_vfmacc_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vfmacc_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu @@ -202,7 +204,7 @@ entry: %0, %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -212,9 +214,9 @@ declare @llvm.riscv.vfmacc.mask.nxv16f16.nxv16f16( , , , - i64); + iXLen); -define @intrinsic_vfmacc_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfmacc_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu @@ -226,7 +228,7 @@ entry: %1, %2, %3, - i64 %4) + iXLen %4) ret %a } @@ -235,9 +237,9 @@ declare @llvm.riscv.vfmacc.nxv1f32.nxv1f32( , , , - i64); + iXLen); -define @intrinsic_vfmacc_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vfmacc_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu @@ -248,7 +250,7 @@ entry: %0, %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -258,9 +260,9 @@ declare @llvm.riscv.vfmacc.mask.nxv1f32.nxv1f32( , , , - i64); + iXLen); -define @intrinsic_vfmacc_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfmacc_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu @@ -272,7 +274,7 @@ entry: %1, %2, %3, - i64 %4) + iXLen %4) ret %a } @@ -281,9 +283,9 @@ declare @llvm.riscv.vfmacc.nxv2f32.nxv2f32( , , , - i64); + iXLen); -define @intrinsic_vfmacc_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vfmacc_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu @@ -294,7 +296,7 @@ entry: %0, %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -304,9 +306,9 @@ declare @llvm.riscv.vfmacc.mask.nxv2f32.nxv2f32( , , , - i64); + iXLen); -define @intrinsic_vfmacc_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfmacc_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu @@ -318,7 +320,7 @@ entry: %1, %2, %3, - i64 %4) + iXLen %4) ret %a } @@ -327,9 +329,9 @@ declare @llvm.riscv.vfmacc.nxv4f32.nxv4f32( , , , - i64); + iXLen); -define @intrinsic_vfmacc_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vfmacc_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu @@ -340,7 +342,7 @@ entry: %0, %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -350,9 +352,9 @@ declare @llvm.riscv.vfmacc.mask.nxv4f32.nxv4f32( , , , - i64); + iXLen); -define @intrinsic_vfmacc_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfmacc_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu @@ -364,7 +366,7 @@ entry: %1, %2, %3, - i64 %4) + iXLen %4) ret %a } @@ -373,9 +375,9 @@ declare @llvm.riscv.vfmacc.nxv8f32.nxv8f32( , , , - i64); + iXLen); -define @intrinsic_vfmacc_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vfmacc_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu @@ -386,7 +388,7 @@ entry: %0, %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -396,9 +398,9 @@ declare @llvm.riscv.vfmacc.mask.nxv8f32.nxv8f32( , , , - i64); + iXLen); -define @intrinsic_vfmacc_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfmacc_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu @@ -410,7 +412,7 @@ entry: %1, %2, %3, - i64 %4) + iXLen %4) ret %a } @@ -419,9 +421,9 @@ declare @llvm.riscv.vfmacc.nxv1f64.nxv1f64( , , , - i64); + iXLen); -define @intrinsic_vfmacc_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vfmacc_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu @@ -432,7 +434,7 @@ entry: %0, %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -442,9 +444,9 @@ declare @llvm.riscv.vfmacc.mask.nxv1f64.nxv1f64( , , , - i64); + iXLen); -define @intrinsic_vfmacc_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfmacc_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu @@ -456,7 +458,7 @@ entry: %1, %2, %3, - i64 %4) + iXLen %4) ret %a } @@ -465,9 +467,9 @@ declare @llvm.riscv.vfmacc.nxv2f64.nxv2f64( , , , - i64); + iXLen); -define @intrinsic_vfmacc_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vfmacc_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu @@ -478,7 +480,7 @@ entry: %0, %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -488,9 +490,9 @@ declare @llvm.riscv.vfmacc.mask.nxv2f64.nxv2f64( , , , - i64); + iXLen); -define @intrinsic_vfmacc_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfmacc_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu @@ -502,7 +504,7 @@ entry: %1, %2, %3, - i64 %4) + iXLen %4) ret %a } @@ -511,9 +513,9 @@ declare @llvm.riscv.vfmacc.nxv4f64.nxv4f64( , , , - i64); + iXLen); -define @intrinsic_vfmacc_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vfmacc_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu @@ -524,7 +526,7 @@ entry: %0, %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -534,9 +536,9 @@ declare @llvm.riscv.vfmacc.mask.nxv4f64.nxv4f64( , , , - i64); + iXLen); -define @intrinsic_vfmacc_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfmacc_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu @@ -548,7 +550,7 @@ entry: %1, %2, %3, - i64 %4) + iXLen %4) ret %a } @@ -557,9 +559,9 @@ declare @llvm.riscv.vfmacc.nxv1f16.f16( , half, , - i64); + iXLen); -define @intrinsic_vfmacc_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, i64 %3) nounwind { +define @intrinsic_vfmacc_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vf_nxv1f16_f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu @@ -570,7 +572,7 @@ entry: %0, half %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -580,9 +582,9 @@ declare @llvm.riscv.vfmacc.mask.nxv1f16.f16( half, , , - i64); + iXLen); -define @intrinsic_vfmacc_mask_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfmacc_mask_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv1f16_f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu @@ -594,7 +596,7 @@ entry: half %1, %2, %3, - i64 %4) + iXLen %4) ret %a } @@ -603,9 +605,9 @@ declare @llvm.riscv.vfmacc.nxv2f16.f16( , half, , - i64); + iXLen); -define @intrinsic_vfmacc_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, i64 %3) nounwind { +define @intrinsic_vfmacc_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vf_nxv2f16_f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu @@ -616,7 +618,7 @@ entry: %0, half %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -626,9 +628,9 @@ declare @llvm.riscv.vfmacc.mask.nxv2f16.f16( half, , , - i64); + iXLen); -define @intrinsic_vfmacc_mask_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfmacc_mask_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv2f16_f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu @@ -640,7 +642,7 @@ entry: half %1, %2, %3, - i64 %4) + iXLen %4) ret %a } @@ -649,9 +651,9 @@ declare @llvm.riscv.vfmacc.nxv4f16.f16( , half, , - i64); + iXLen); -define @intrinsic_vfmacc_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, i64 %3) nounwind { +define @intrinsic_vfmacc_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vf_nxv4f16_f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu @@ -662,7 +664,7 @@ entry: %0, half %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -672,9 +674,9 @@ declare @llvm.riscv.vfmacc.mask.nxv4f16.f16( half, , , - i64); + iXLen); -define @intrinsic_vfmacc_mask_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfmacc_mask_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv4f16_f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu @@ -686,7 +688,7 @@ entry: half %1, %2, %3, - i64 %4) + iXLen %4) ret %a } @@ -695,9 +697,9 @@ declare @llvm.riscv.vfmacc.nxv8f16.f16( , half, , - i64); + iXLen); -define @intrinsic_vfmacc_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, i64 %3) nounwind { +define @intrinsic_vfmacc_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vf_nxv8f16_f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu @@ -708,7 +710,7 @@ entry: %0, half %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -718,9 +720,9 @@ declare @llvm.riscv.vfmacc.mask.nxv8f16.f16( half, , , - i64); + iXLen); -define @intrinsic_vfmacc_mask_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfmacc_mask_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv8f16_f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu @@ -732,7 +734,7 @@ entry: half %1, %2, %3, - i64 %4) + iXLen %4) ret %a } @@ -741,9 +743,9 @@ declare @llvm.riscv.vfmacc.nxv16f16.f16( , half, , - i64); + iXLen); -define @intrinsic_vfmacc_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, i64 %3) nounwind { +define @intrinsic_vfmacc_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vf_nxv16f16_f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu @@ -754,7 +756,7 @@ entry: %0, half %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -764,9 +766,9 @@ declare @llvm.riscv.vfmacc.mask.nxv16f16.f16( half, , , - i64); + iXLen); -define @intrinsic_vfmacc_mask_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfmacc_mask_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv16f16_f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu @@ -778,7 +780,7 @@ entry: half %1, %2, %3, - i64 %4) + iXLen %4) ret %a } @@ -787,9 +789,9 @@ declare @llvm.riscv.vfmacc.nxv1f32.f32( , float, , - i64); + iXLen); -define @intrinsic_vfmacc_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, i64 %3) nounwind { +define @intrinsic_vfmacc_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vf_nxv1f32_f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu @@ -800,7 +802,7 @@ entry: %0, float %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -810,9 +812,9 @@ declare @llvm.riscv.vfmacc.mask.nxv1f32.f32( float, , , - i64); + iXLen); -define @intrinsic_vfmacc_mask_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfmacc_mask_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv1f32_f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu @@ -824,7 +826,7 @@ entry: float %1, %2, %3, - i64 %4) + iXLen %4) ret %a } @@ -833,9 +835,9 @@ declare @llvm.riscv.vfmacc.nxv2f32.f32( , float, , - i64); + iXLen); -define @intrinsic_vfmacc_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, i64 %3) nounwind { +define @intrinsic_vfmacc_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vf_nxv2f32_f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu @@ -846,7 +848,7 @@ entry: %0, float %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -856,9 +858,9 @@ declare @llvm.riscv.vfmacc.mask.nxv2f32.f32( float, , , - i64); + iXLen); -define @intrinsic_vfmacc_mask_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfmacc_mask_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv2f32_f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu @@ -870,7 +872,7 @@ entry: float %1, %2, %3, - i64 %4) + iXLen %4) ret %a } @@ -879,9 +881,9 @@ declare @llvm.riscv.vfmacc.nxv4f32.f32( , float, , - i64); + iXLen); -define @intrinsic_vfmacc_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, i64 %3) nounwind { +define @intrinsic_vfmacc_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vf_nxv4f32_f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu @@ -892,7 +894,7 @@ entry: %0, float %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -902,9 +904,9 @@ declare @llvm.riscv.vfmacc.mask.nxv4f32.f32( float, , , - i64); + iXLen); -define @intrinsic_vfmacc_mask_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfmacc_mask_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv4f32_f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu @@ -916,7 +918,7 @@ entry: float %1, %2, %3, - i64 %4) + iXLen %4) ret %a } @@ -925,9 +927,9 @@ declare @llvm.riscv.vfmacc.nxv8f32.f32( , float, , - i64); + iXLen); -define @intrinsic_vfmacc_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, i64 %3) nounwind { +define @intrinsic_vfmacc_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vf_nxv8f32_f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu @@ -938,7 +940,7 @@ entry: %0, float %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -948,9 +950,9 @@ declare @llvm.riscv.vfmacc.mask.nxv8f32.f32( float, , , - i64); + iXLen); -define @intrinsic_vfmacc_mask_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfmacc_mask_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv8f32_f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu @@ -962,7 +964,7 @@ entry: float %1, %2, %3, - i64 %4) + iXLen %4) ret %a } @@ -971,9 +973,9 @@ declare @llvm.riscv.vfmacc.nxv1f64.f64( , double, , - i64); + iXLen); -define @intrinsic_vfmacc_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, i64 %3) nounwind { +define @intrinsic_vfmacc_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vf_nxv1f64_f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu @@ -984,7 +986,7 @@ entry: %0, double %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -994,9 +996,9 @@ declare @llvm.riscv.vfmacc.mask.nxv1f64.f64( double, , , - i64); + iXLen); -define @intrinsic_vfmacc_mask_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfmacc_mask_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv1f64_f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu @@ -1008,7 +1010,7 @@ entry: double %1, %2, %3, - i64 %4) + iXLen %4) ret %a } @@ -1017,9 +1019,9 @@ declare @llvm.riscv.vfmacc.nxv2f64.f64( , double, , - i64); + iXLen); -define @intrinsic_vfmacc_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, i64 %3) nounwind { +define @intrinsic_vfmacc_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vf_nxv2f64_f64_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu @@ -1030,7 +1032,7 @@ entry: %0, double %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -1040,9 +1042,9 @@ declare @llvm.riscv.vfmacc.mask.nxv2f64.f64( double, , , - i64); + iXLen); -define @intrinsic_vfmacc_mask_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfmacc_mask_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv2f64_f64_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu @@ -1054,7 +1056,7 @@ entry: double %1, %2, %3, - i64 %4) + iXLen %4) ret %a } @@ -1063,9 +1065,9 @@ declare @llvm.riscv.vfmacc.nxv4f64.f64( , double, , - i64); + iXLen); -define @intrinsic_vfmacc_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, i64 %3) nounwind { +define @intrinsic_vfmacc_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vf_nxv4f64_f64_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu @@ -1076,7 +1078,7 @@ entry: %0, double %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -1086,9 +1088,9 @@ declare @llvm.riscv.vfmacc.mask.nxv4f64.f64( double, , , - i64); + iXLen); -define @intrinsic_vfmacc_mask_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfmacc_mask_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv4f64_f64_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu @@ -1100,7 +1102,7 @@ entry: double %1, %2, %3, - i64 %4) + iXLen %4) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmadd-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfmadd-rv64.ll deleted file mode 100644 index afd41bd8a212..000000000000 --- a/llvm/test/CodeGen/RISCV/rvv/vfmadd-rv64.ll +++ /dev/null @@ -1,1106 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: -target-abi=lp64d < %s | FileCheck %s -declare @llvm.riscv.vfmadd.nxv1f16.nxv1f16( - , - , - , - i64); - -define @intrinsic_vfmadd_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmadd_vv_nxv1f16_nxv1f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu -; CHECK-NEXT: vfmadd.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmadd.nxv1f16.nxv1f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfmadd.mask.nxv1f16.nxv1f16( - , - , - , - , - i64); - -define @intrinsic_vfmadd_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv1f16_nxv1f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu -; CHECK-NEXT: vfmadd.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmadd.mask.nxv1f16.nxv1f16( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfmadd.nxv2f16.nxv2f16( - , - , - , - i64); - -define @intrinsic_vfmadd_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmadd_vv_nxv2f16_nxv2f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu -; CHECK-NEXT: vfmadd.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmadd.nxv2f16.nxv2f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfmadd.mask.nxv2f16.nxv2f16( - , - , - , - , - i64); - -define @intrinsic_vfmadd_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv2f16_nxv2f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu -; CHECK-NEXT: vfmadd.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmadd.mask.nxv2f16.nxv2f16( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfmadd.nxv4f16.nxv4f16( - , - , - , - i64); - -define @intrinsic_vfmadd_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmadd_vv_nxv4f16_nxv4f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu -; CHECK-NEXT: vfmadd.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmadd.nxv4f16.nxv4f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfmadd.mask.nxv4f16.nxv4f16( - , - , - , - , - i64); - -define @intrinsic_vfmadd_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv4f16_nxv4f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu -; CHECK-NEXT: vfmadd.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmadd.mask.nxv4f16.nxv4f16( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfmadd.nxv8f16.nxv8f16( - , - , - , - i64); - -define @intrinsic_vfmadd_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmadd_vv_nxv8f16_nxv8f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu -; CHECK-NEXT: vfmadd.vv v8, v10, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmadd.nxv8f16.nxv8f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfmadd.mask.nxv8f16.nxv8f16( - , - , - , - , - i64); - -define @intrinsic_vfmadd_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv8f16_nxv8f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu -; CHECK-NEXT: vfmadd.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmadd.mask.nxv8f16.nxv8f16( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfmadd.nxv16f16.nxv16f16( - , - , - , - i64); - -define @intrinsic_vfmadd_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmadd_vv_nxv16f16_nxv16f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu -; CHECK-NEXT: vfmadd.vv v8, v12, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmadd.nxv16f16.nxv16f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfmadd.mask.nxv16f16.nxv16f16( - , - , - , - , - i64); - -define @intrinsic_vfmadd_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv16f16_nxv16f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu -; CHECK-NEXT: vfmadd.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmadd.mask.nxv16f16.nxv16f16( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfmadd.nxv1f32.nxv1f32( - , - , - , - i64); - -define @intrinsic_vfmadd_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmadd_vv_nxv1f32_nxv1f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu -; CHECK-NEXT: vfmadd.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmadd.nxv1f32.nxv1f32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfmadd.mask.nxv1f32.nxv1f32( - , - , - , - , - i64); - -define @intrinsic_vfmadd_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv1f32_nxv1f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu -; CHECK-NEXT: vfmadd.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmadd.mask.nxv1f32.nxv1f32( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfmadd.nxv2f32.nxv2f32( - , - , - , - i64); - -define @intrinsic_vfmadd_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmadd_vv_nxv2f32_nxv2f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu -; CHECK-NEXT: vfmadd.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmadd.nxv2f32.nxv2f32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfmadd.mask.nxv2f32.nxv2f32( - , - , - , - , - i64); - -define @intrinsic_vfmadd_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv2f32_nxv2f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu -; CHECK-NEXT: vfmadd.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmadd.mask.nxv2f32.nxv2f32( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfmadd.nxv4f32.nxv4f32( - , - , - , - i64); - -define @intrinsic_vfmadd_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmadd_vv_nxv4f32_nxv4f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu -; CHECK-NEXT: vfmadd.vv v8, v10, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmadd.nxv4f32.nxv4f32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfmadd.mask.nxv4f32.nxv4f32( - , - , - , - , - i64); - -define @intrinsic_vfmadd_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv4f32_nxv4f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu -; CHECK-NEXT: vfmadd.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmadd.mask.nxv4f32.nxv4f32( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfmadd.nxv8f32.nxv8f32( - , - , - , - i64); - -define @intrinsic_vfmadd_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmadd_vv_nxv8f32_nxv8f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu -; CHECK-NEXT: vfmadd.vv v8, v12, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmadd.nxv8f32.nxv8f32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfmadd.mask.nxv8f32.nxv8f32( - , - , - , - , - i64); - -define @intrinsic_vfmadd_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv8f32_nxv8f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu -; CHECK-NEXT: vfmadd.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmadd.mask.nxv8f32.nxv8f32( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfmadd.nxv1f64.nxv1f64( - , - , - , - i64); - -define @intrinsic_vfmadd_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmadd_vv_nxv1f64_nxv1f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu -; CHECK-NEXT: vfmadd.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmadd.nxv1f64.nxv1f64( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfmadd.mask.nxv1f64.nxv1f64( - , - , - , - , - i64); - -define @intrinsic_vfmadd_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv1f64_nxv1f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu -; CHECK-NEXT: vfmadd.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmadd.mask.nxv1f64.nxv1f64( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfmadd.nxv2f64.nxv2f64( - , - , - , - i64); - -define @intrinsic_vfmadd_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmadd_vv_nxv2f64_nxv2f64_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu -; CHECK-NEXT: vfmadd.vv v8, v10, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmadd.nxv2f64.nxv2f64( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfmadd.mask.nxv2f64.nxv2f64( - , - , - , - , - i64); - -define @intrinsic_vfmadd_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv2f64_nxv2f64_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu -; CHECK-NEXT: vfmadd.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmadd.mask.nxv2f64.nxv2f64( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfmadd.nxv4f64.nxv4f64( - , - , - , - i64); - -define @intrinsic_vfmadd_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmadd_vv_nxv4f64_nxv4f64_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu -; CHECK-NEXT: vfmadd.vv v8, v12, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmadd.nxv4f64.nxv4f64( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfmadd.mask.nxv4f64.nxv4f64( - , - , - , - , - i64); - -define @intrinsic_vfmadd_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv4f64_nxv4f64_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu -; CHECK-NEXT: vfmadd.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmadd.mask.nxv4f64.nxv4f64( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfmadd.nxv1f16.f16( - , - half, - , - i64); - -define @intrinsic_vfmadd_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmadd_vf_nxv1f16_f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu -; CHECK-NEXT: vfmadd.vf v8, fa0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmadd.nxv1f16.f16( - %0, - half %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfmadd.mask.nxv1f16.f16( - , - half, - , - , - i64); - -define @intrinsic_vfmadd_mask_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv1f16_f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu -; CHECK-NEXT: vfmadd.vf v8, fa0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmadd.mask.nxv1f16.f16( - %0, - half %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfmadd.nxv2f16.f16( - , - half, - , - i64); - -define @intrinsic_vfmadd_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmadd_vf_nxv2f16_f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu -; CHECK-NEXT: vfmadd.vf v8, fa0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmadd.nxv2f16.f16( - %0, - half %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfmadd.mask.nxv2f16.f16( - , - half, - , - , - i64); - -define @intrinsic_vfmadd_mask_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv2f16_f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu -; CHECK-NEXT: vfmadd.vf v8, fa0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmadd.mask.nxv2f16.f16( - %0, - half %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfmadd.nxv4f16.f16( - , - half, - , - i64); - -define @intrinsic_vfmadd_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmadd_vf_nxv4f16_f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu -; CHECK-NEXT: vfmadd.vf v8, fa0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmadd.nxv4f16.f16( - %0, - half %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfmadd.mask.nxv4f16.f16( - , - half, - , - , - i64); - -define @intrinsic_vfmadd_mask_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv4f16_f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu -; CHECK-NEXT: vfmadd.vf v8, fa0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmadd.mask.nxv4f16.f16( - %0, - half %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfmadd.nxv8f16.f16( - , - half, - , - i64); - -define @intrinsic_vfmadd_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmadd_vf_nxv8f16_f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu -; CHECK-NEXT: vfmadd.vf v8, fa0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmadd.nxv8f16.f16( - %0, - half %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfmadd.mask.nxv8f16.f16( - , - half, - , - , - i64); - -define @intrinsic_vfmadd_mask_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv8f16_f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu -; CHECK-NEXT: vfmadd.vf v8, fa0, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmadd.mask.nxv8f16.f16( - %0, - half %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfmadd.nxv16f16.f16( - , - half, - , - i64); - -define @intrinsic_vfmadd_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmadd_vf_nxv16f16_f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu -; CHECK-NEXT: vfmadd.vf v8, fa0, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmadd.nxv16f16.f16( - %0, - half %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfmadd.mask.nxv16f16.f16( - , - half, - , - , - i64); - -define @intrinsic_vfmadd_mask_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv16f16_f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu -; CHECK-NEXT: vfmadd.vf v8, fa0, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmadd.mask.nxv16f16.f16( - %0, - half %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfmadd.nxv1f32.f32( - , - float, - , - i64); - -define @intrinsic_vfmadd_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmadd_vf_nxv1f32_f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu -; CHECK-NEXT: vfmadd.vf v8, fa0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmadd.nxv1f32.f32( - %0, - float %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfmadd.mask.nxv1f32.f32( - , - float, - , - , - i64); - -define @intrinsic_vfmadd_mask_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv1f32_f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu -; CHECK-NEXT: vfmadd.vf v8, fa0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmadd.mask.nxv1f32.f32( - %0, - float %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfmadd.nxv2f32.f32( - , - float, - , - i64); - -define @intrinsic_vfmadd_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmadd_vf_nxv2f32_f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu -; CHECK-NEXT: vfmadd.vf v8, fa0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmadd.nxv2f32.f32( - %0, - float %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfmadd.mask.nxv2f32.f32( - , - float, - , - , - i64); - -define @intrinsic_vfmadd_mask_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv2f32_f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu -; CHECK-NEXT: vfmadd.vf v8, fa0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmadd.mask.nxv2f32.f32( - %0, - float %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfmadd.nxv4f32.f32( - , - float, - , - i64); - -define @intrinsic_vfmadd_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmadd_vf_nxv4f32_f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu -; CHECK-NEXT: vfmadd.vf v8, fa0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmadd.nxv4f32.f32( - %0, - float %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfmadd.mask.nxv4f32.f32( - , - float, - , - , - i64); - -define @intrinsic_vfmadd_mask_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv4f32_f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu -; CHECK-NEXT: vfmadd.vf v8, fa0, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmadd.mask.nxv4f32.f32( - %0, - float %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfmadd.nxv8f32.f32( - , - float, - , - i64); - -define @intrinsic_vfmadd_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmadd_vf_nxv8f32_f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu -; CHECK-NEXT: vfmadd.vf v8, fa0, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmadd.nxv8f32.f32( - %0, - float %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfmadd.mask.nxv8f32.f32( - , - float, - , - , - i64); - -define @intrinsic_vfmadd_mask_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv8f32_f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu -; CHECK-NEXT: vfmadd.vf v8, fa0, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmadd.mask.nxv8f32.f32( - %0, - float %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfmadd.nxv1f64.f64( - , - double, - , - i64); - -define @intrinsic_vfmadd_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmadd_vf_nxv1f64_f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu -; CHECK-NEXT: vfmadd.vf v8, fa0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmadd.nxv1f64.f64( - %0, - double %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfmadd.mask.nxv1f64.f64( - , - double, - , - , - i64); - -define @intrinsic_vfmadd_mask_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv1f64_f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu -; CHECK-NEXT: vfmadd.vf v8, fa0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmadd.mask.nxv1f64.f64( - %0, - double %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfmadd.nxv2f64.f64( - , - double, - , - i64); - -define @intrinsic_vfmadd_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmadd_vf_nxv2f64_f64_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu -; CHECK-NEXT: vfmadd.vf v8, fa0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmadd.nxv2f64.f64( - %0, - double %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfmadd.mask.nxv2f64.f64( - , - double, - , - , - i64); - -define @intrinsic_vfmadd_mask_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv2f64_f64_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu -; CHECK-NEXT: vfmadd.vf v8, fa0, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmadd.mask.nxv2f64.f64( - %0, - double %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfmadd.nxv4f64.f64( - , - double, - , - i64); - -define @intrinsic_vfmadd_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmadd_vf_nxv4f64_f64_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu -; CHECK-NEXT: vfmadd.vf v8, fa0, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmadd.nxv4f64.f64( - %0, - double %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfmadd.mask.nxv4f64.f64( - , - double, - , - , - i64); - -define @intrinsic_vfmadd_mask_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv4f64_f64_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu -; CHECK-NEXT: vfmadd.vf v8, fa0, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmadd.mask.nxv4f64.f64( - %0, - double %1, - %2, - %3, - i64 %4) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmadd-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfmadd.ll similarity index 89% rename from llvm/test/CodeGen/RISCV/rvv/vfmadd-rv32.ll rename to llvm/test/CodeGen/RISCV/rvv/vfmadd.ll index cfb32cfab4cd..9313e440e500 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfmadd-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmadd.ll @@ -1,13 +1,15 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: -target-abi=ilp32d < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfmadd.nxv1f16.nxv1f16( , , , - i32); + iXLen); -define @intrinsic_vfmadd_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfmadd_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu @@ -18,7 +20,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -28,9 +30,9 @@ declare @llvm.riscv.vfmadd.mask.nxv1f16.nxv1f16( , , , - i32); + iXLen); -define @intrinsic_vfmadd_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmadd_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu @@ -42,7 +44,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -51,9 +53,9 @@ declare @llvm.riscv.vfmadd.nxv2f16.nxv2f16( , , , - i32); + iXLen); -define @intrinsic_vfmadd_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfmadd_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu @@ -64,7 +66,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -74,9 +76,9 @@ declare @llvm.riscv.vfmadd.mask.nxv2f16.nxv2f16( , , , - i32); + iXLen); -define @intrinsic_vfmadd_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmadd_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu @@ -88,7 +90,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -97,9 +99,9 @@ declare @llvm.riscv.vfmadd.nxv4f16.nxv4f16( , , , - i32); + iXLen); -define @intrinsic_vfmadd_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfmadd_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu @@ -110,7 +112,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -120,9 +122,9 @@ declare @llvm.riscv.vfmadd.mask.nxv4f16.nxv4f16( , , , - i32); + iXLen); -define @intrinsic_vfmadd_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmadd_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu @@ -134,7 +136,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -143,9 +145,9 @@ declare @llvm.riscv.vfmadd.nxv8f16.nxv8f16( , , , - i32); + iXLen); -define @intrinsic_vfmadd_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfmadd_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu @@ -156,7 +158,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -166,9 +168,9 @@ declare @llvm.riscv.vfmadd.mask.nxv8f16.nxv8f16( , , , - i32); + iXLen); -define @intrinsic_vfmadd_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmadd_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu @@ -180,7 +182,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -189,9 +191,9 @@ declare @llvm.riscv.vfmadd.nxv16f16.nxv16f16( , , , - i32); + iXLen); -define @intrinsic_vfmadd_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfmadd_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu @@ -202,7 +204,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -212,9 +214,9 @@ declare @llvm.riscv.vfmadd.mask.nxv16f16.nxv16f16( , , , - i32); + iXLen); -define @intrinsic_vfmadd_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmadd_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu @@ -226,7 +228,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -235,9 +237,9 @@ declare @llvm.riscv.vfmadd.nxv1f32.nxv1f32( , , , - i32); + iXLen); -define @intrinsic_vfmadd_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfmadd_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu @@ -248,7 +250,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -258,9 +260,9 @@ declare @llvm.riscv.vfmadd.mask.nxv1f32.nxv1f32( , , , - i32); + iXLen); -define @intrinsic_vfmadd_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmadd_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu @@ -272,7 +274,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -281,9 +283,9 @@ declare @llvm.riscv.vfmadd.nxv2f32.nxv2f32( , , , - i32); + iXLen); -define @intrinsic_vfmadd_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfmadd_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu @@ -294,7 +296,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -304,9 +306,9 @@ declare @llvm.riscv.vfmadd.mask.nxv2f32.nxv2f32( , , , - i32); + iXLen); -define @intrinsic_vfmadd_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmadd_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu @@ -318,7 +320,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -327,9 +329,9 @@ declare @llvm.riscv.vfmadd.nxv4f32.nxv4f32( , , , - i32); + iXLen); -define @intrinsic_vfmadd_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfmadd_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu @@ -340,7 +342,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -350,9 +352,9 @@ declare @llvm.riscv.vfmadd.mask.nxv4f32.nxv4f32( , , , - i32); + iXLen); -define @intrinsic_vfmadd_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmadd_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu @@ -364,7 +366,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -373,9 +375,9 @@ declare @llvm.riscv.vfmadd.nxv8f32.nxv8f32( , , , - i32); + iXLen); -define @intrinsic_vfmadd_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfmadd_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu @@ -386,7 +388,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -396,9 +398,9 @@ declare @llvm.riscv.vfmadd.mask.nxv8f32.nxv8f32( , , , - i32); + iXLen); -define @intrinsic_vfmadd_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmadd_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu @@ -410,7 +412,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -419,9 +421,9 @@ declare @llvm.riscv.vfmadd.nxv1f64.nxv1f64( , , , - i32); + iXLen); -define @intrinsic_vfmadd_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfmadd_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu @@ -432,7 +434,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -442,9 +444,9 @@ declare @llvm.riscv.vfmadd.mask.nxv1f64.nxv1f64( , , , - i32); + iXLen); -define @intrinsic_vfmadd_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmadd_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu @@ -456,7 +458,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -465,9 +467,9 @@ declare @llvm.riscv.vfmadd.nxv2f64.nxv2f64( , , , - i32); + iXLen); -define @intrinsic_vfmadd_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfmadd_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu @@ -478,7 +480,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -488,9 +490,9 @@ declare @llvm.riscv.vfmadd.mask.nxv2f64.nxv2f64( , , , - i32); + iXLen); -define @intrinsic_vfmadd_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmadd_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu @@ -502,7 +504,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -511,9 +513,9 @@ declare @llvm.riscv.vfmadd.nxv4f64.nxv4f64( , , , - i32); + iXLen); -define @intrinsic_vfmadd_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfmadd_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu @@ -524,7 +526,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -534,9 +536,9 @@ declare @llvm.riscv.vfmadd.mask.nxv4f64.nxv4f64( , , , - i32); + iXLen); -define @intrinsic_vfmadd_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmadd_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu @@ -548,7 +550,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -557,9 +559,9 @@ declare @llvm.riscv.vfmadd.nxv1f16.f16( , half, , - i32); + iXLen); -define @intrinsic_vfmadd_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, i32 %3) nounwind { +define @intrinsic_vfmadd_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vf_nxv1f16_f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu @@ -570,7 +572,7 @@ entry: %0, half %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -580,9 +582,9 @@ declare @llvm.riscv.vfmadd.mask.nxv1f16.f16( half, , , - i32); + iXLen); -define @intrinsic_vfmadd_mask_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmadd_mask_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv1f16_f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu @@ -594,7 +596,7 @@ entry: half %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -603,9 +605,9 @@ declare @llvm.riscv.vfmadd.nxv2f16.f16( , half, , - i32); + iXLen); -define @intrinsic_vfmadd_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, i32 %3) nounwind { +define @intrinsic_vfmadd_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vf_nxv2f16_f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu @@ -616,7 +618,7 @@ entry: %0, half %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -626,9 +628,9 @@ declare @llvm.riscv.vfmadd.mask.nxv2f16.f16( half, , , - i32); + iXLen); -define @intrinsic_vfmadd_mask_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmadd_mask_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv2f16_f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu @@ -640,7 +642,7 @@ entry: half %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -649,9 +651,9 @@ declare @llvm.riscv.vfmadd.nxv4f16.f16( , half, , - i32); + iXLen); -define @intrinsic_vfmadd_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, i32 %3) nounwind { +define @intrinsic_vfmadd_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vf_nxv4f16_f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu @@ -662,7 +664,7 @@ entry: %0, half %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -672,9 +674,9 @@ declare @llvm.riscv.vfmadd.mask.nxv4f16.f16( half, , , - i32); + iXLen); -define @intrinsic_vfmadd_mask_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmadd_mask_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv4f16_f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu @@ -686,7 +688,7 @@ entry: half %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -695,9 +697,9 @@ declare @llvm.riscv.vfmadd.nxv8f16.f16( , half, , - i32); + iXLen); -define @intrinsic_vfmadd_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, i32 %3) nounwind { +define @intrinsic_vfmadd_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vf_nxv8f16_f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu @@ -708,7 +710,7 @@ entry: %0, half %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -718,9 +720,9 @@ declare @llvm.riscv.vfmadd.mask.nxv8f16.f16( half, , , - i32); + iXLen); -define @intrinsic_vfmadd_mask_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmadd_mask_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv8f16_f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu @@ -732,7 +734,7 @@ entry: half %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -741,9 +743,9 @@ declare @llvm.riscv.vfmadd.nxv16f16.f16( , half, , - i32); + iXLen); -define @intrinsic_vfmadd_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, i32 %3) nounwind { +define @intrinsic_vfmadd_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vf_nxv16f16_f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu @@ -754,7 +756,7 @@ entry: %0, half %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -764,9 +766,9 @@ declare @llvm.riscv.vfmadd.mask.nxv16f16.f16( half, , , - i32); + iXLen); -define @intrinsic_vfmadd_mask_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmadd_mask_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv16f16_f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu @@ -778,7 +780,7 @@ entry: half %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -787,9 +789,9 @@ declare @llvm.riscv.vfmadd.nxv1f32.f32( , float, , - i32); + iXLen); -define @intrinsic_vfmadd_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, i32 %3) nounwind { +define @intrinsic_vfmadd_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vf_nxv1f32_f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu @@ -800,7 +802,7 @@ entry: %0, float %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -810,9 +812,9 @@ declare @llvm.riscv.vfmadd.mask.nxv1f32.f32( float, , , - i32); + iXLen); -define @intrinsic_vfmadd_mask_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmadd_mask_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv1f32_f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu @@ -824,7 +826,7 @@ entry: float %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -833,9 +835,9 @@ declare @llvm.riscv.vfmadd.nxv2f32.f32( , float, , - i32); + iXLen); -define @intrinsic_vfmadd_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, i32 %3) nounwind { +define @intrinsic_vfmadd_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vf_nxv2f32_f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu @@ -846,7 +848,7 @@ entry: %0, float %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -856,9 +858,9 @@ declare @llvm.riscv.vfmadd.mask.nxv2f32.f32( float, , , - i32); + iXLen); -define @intrinsic_vfmadd_mask_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmadd_mask_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv2f32_f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu @@ -870,7 +872,7 @@ entry: float %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -879,9 +881,9 @@ declare @llvm.riscv.vfmadd.nxv4f32.f32( , float, , - i32); + iXLen); -define @intrinsic_vfmadd_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, i32 %3) nounwind { +define @intrinsic_vfmadd_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vf_nxv4f32_f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu @@ -892,7 +894,7 @@ entry: %0, float %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -902,9 +904,9 @@ declare @llvm.riscv.vfmadd.mask.nxv4f32.f32( float, , , - i32); + iXLen); -define @intrinsic_vfmadd_mask_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmadd_mask_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv4f32_f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu @@ -916,7 +918,7 @@ entry: float %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -925,9 +927,9 @@ declare @llvm.riscv.vfmadd.nxv8f32.f32( , float, , - i32); + iXLen); -define @intrinsic_vfmadd_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, i32 %3) nounwind { +define @intrinsic_vfmadd_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vf_nxv8f32_f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu @@ -938,7 +940,7 @@ entry: %0, float %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -948,9 +950,9 @@ declare @llvm.riscv.vfmadd.mask.nxv8f32.f32( float, , , - i32); + iXLen); -define @intrinsic_vfmadd_mask_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmadd_mask_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv8f32_f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu @@ -962,7 +964,7 @@ entry: float %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -971,9 +973,9 @@ declare @llvm.riscv.vfmadd.nxv1f64.f64( , double, , - i32); + iXLen); -define @intrinsic_vfmadd_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, i32 %3) nounwind { +define @intrinsic_vfmadd_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vf_nxv1f64_f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu @@ -984,7 +986,7 @@ entry: %0, double %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -994,9 +996,9 @@ declare @llvm.riscv.vfmadd.mask.nxv1f64.f64( double, , , - i32); + iXLen); -define @intrinsic_vfmadd_mask_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmadd_mask_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv1f64_f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu @@ -1008,7 +1010,7 @@ entry: double %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -1017,9 +1019,9 @@ declare @llvm.riscv.vfmadd.nxv2f64.f64( , double, , - i32); + iXLen); -define @intrinsic_vfmadd_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, i32 %3) nounwind { +define @intrinsic_vfmadd_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vf_nxv2f64_f64_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu @@ -1030,7 +1032,7 @@ entry: %0, double %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -1040,9 +1042,9 @@ declare @llvm.riscv.vfmadd.mask.nxv2f64.f64( double, , , - i32); + iXLen); -define @intrinsic_vfmadd_mask_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmadd_mask_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv2f64_f64_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu @@ -1054,7 +1056,7 @@ entry: double %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -1063,9 +1065,9 @@ declare @llvm.riscv.vfmadd.nxv4f64.f64( , double, , - i32); + iXLen); -define @intrinsic_vfmadd_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, i32 %3) nounwind { +define @intrinsic_vfmadd_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vf_nxv4f64_f64_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu @@ -1076,7 +1078,7 @@ entry: %0, double %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -1086,9 +1088,9 @@ declare @llvm.riscv.vfmadd.mask.nxv4f64.f64( double, , , - i32); + iXLen); -define @intrinsic_vfmadd_mask_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmadd_mask_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv4f64_f64_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu @@ -1100,7 +1102,7 @@ entry: double %1, %2, %3, - i32 %4) + iXLen %4) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmax-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfmax-rv32.ll deleted file mode 100644 index 98b4cf71da14..000000000000 --- a/llvm/test/CodeGen/RISCV/rvv/vfmax-rv32.ll +++ /dev/null @@ -1,1355 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: -target-abi=ilp32d < %s | FileCheck %s -declare @llvm.riscv.vfmax.nxv1f16.nxv1f16( - , - , - i32); - -define @intrinsic_vfmax_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmax_vv_nxv1f16_nxv1f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfmax.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmax.nxv1f16.nxv1f16( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfmax.mask.nxv1f16.nxv1f16( - , - , - , - , - i32, - i32); - -define @intrinsic_vfmax_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv1f16_nxv1f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfmax.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmax.mask.nxv1f16.nxv1f16( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfmax.nxv2f16.nxv2f16( - , - , - i32); - -define @intrinsic_vfmax_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmax_vv_nxv2f16_nxv2f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfmax.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmax.nxv2f16.nxv2f16( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfmax.mask.nxv2f16.nxv2f16( - , - , - , - , - i32, - i32); - -define @intrinsic_vfmax_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv2f16_nxv2f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfmax.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmax.mask.nxv2f16.nxv2f16( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfmax.nxv4f16.nxv4f16( - , - , - i32); - -define @intrinsic_vfmax_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmax_vv_nxv4f16_nxv4f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfmax.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmax.nxv4f16.nxv4f16( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfmax.mask.nxv4f16.nxv4f16( - , - , - , - , - i32, - i32); - -define @intrinsic_vfmax_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv4f16_nxv4f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfmax.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmax.mask.nxv4f16.nxv4f16( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfmax.nxv8f16.nxv8f16( - , - , - i32); - -define @intrinsic_vfmax_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmax_vv_nxv8f16_nxv8f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfmax.vv v8, v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmax.nxv8f16.nxv8f16( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfmax.mask.nxv8f16.nxv8f16( - , - , - , - , - i32, - i32); - -define @intrinsic_vfmax_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv8f16_nxv8f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfmax.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmax.mask.nxv8f16.nxv8f16( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfmax.nxv16f16.nxv16f16( - , - , - i32); - -define @intrinsic_vfmax_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmax_vv_nxv16f16_nxv16f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfmax.vv v8, v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmax.nxv16f16.nxv16f16( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfmax.mask.nxv16f16.nxv16f16( - , - , - , - , - i32, - i32); - -define @intrinsic_vfmax_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv16f16_nxv16f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfmax.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmax.mask.nxv16f16.nxv16f16( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfmax.nxv32f16.nxv32f16( - , - , - i32); - -define @intrinsic_vfmax_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmax_vv_nxv32f16_nxv32f16_nxv32f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu -; CHECK-NEXT: vfmax.vv v8, v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmax.nxv32f16.nxv32f16( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfmax.mask.nxv32f16.nxv32f16( - , - , - , - , - i32, - i32); - -define @intrinsic_vfmax_mask_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv32f16_nxv32f16_nxv32f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu -; CHECK-NEXT: vfmax.vv v8, v16, v24, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmax.mask.nxv32f16.nxv32f16( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfmax.nxv1f32.nxv1f32( - , - , - i32); - -define @intrinsic_vfmax_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmax_vv_nxv1f32_nxv1f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfmax.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmax.nxv1f32.nxv1f32( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfmax.mask.nxv1f32.nxv1f32( - , - , - , - , - i32, - i32); - -define @intrinsic_vfmax_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv1f32_nxv1f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfmax.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmax.mask.nxv1f32.nxv1f32( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfmax.nxv2f32.nxv2f32( - , - , - i32); - -define @intrinsic_vfmax_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmax_vv_nxv2f32_nxv2f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfmax.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmax.nxv2f32.nxv2f32( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfmax.mask.nxv2f32.nxv2f32( - , - , - , - , - i32, - i32); - -define @intrinsic_vfmax_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv2f32_nxv2f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfmax.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmax.mask.nxv2f32.nxv2f32( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfmax.nxv4f32.nxv4f32( - , - , - i32); - -define @intrinsic_vfmax_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmax_vv_nxv4f32_nxv4f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfmax.vv v8, v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmax.nxv4f32.nxv4f32( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfmax.mask.nxv4f32.nxv4f32( - , - , - , - , - i32, - i32); - -define @intrinsic_vfmax_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv4f32_nxv4f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfmax.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmax.mask.nxv4f32.nxv4f32( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfmax.nxv8f32.nxv8f32( - , - , - i32); - -define @intrinsic_vfmax_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmax_vv_nxv8f32_nxv8f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfmax.vv v8, v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmax.nxv8f32.nxv8f32( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfmax.mask.nxv8f32.nxv8f32( - , - , - , - , - i32, - i32); - -define @intrinsic_vfmax_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv8f32_nxv8f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfmax.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmax.mask.nxv8f32.nxv8f32( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfmax.nxv16f32.nxv16f32( - , - , - i32); - -define @intrinsic_vfmax_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmax_vv_nxv16f32_nxv16f32_nxv16f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu -; CHECK-NEXT: vfmax.vv v8, v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmax.nxv16f32.nxv16f32( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfmax.mask.nxv16f32.nxv16f32( - , - , - , - , - i32, - i32); - -define @intrinsic_vfmax_mask_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv16f32_nxv16f32_nxv16f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vl8re32.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu -; CHECK-NEXT: vfmax.vv v8, v16, v24, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmax.mask.nxv16f32.nxv16f32( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfmax.nxv1f64.nxv1f64( - , - , - i32); - -define @intrinsic_vfmax_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmax_vv_nxv1f64_nxv1f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vfmax.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmax.nxv1f64.nxv1f64( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfmax.mask.nxv1f64.nxv1f64( - , - , - , - , - i32, - i32); - -define @intrinsic_vfmax_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv1f64_nxv1f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vfmax.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmax.mask.nxv1f64.nxv1f64( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfmax.nxv2f64.nxv2f64( - , - , - i32); - -define @intrinsic_vfmax_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmax_vv_nxv2f64_nxv2f64_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vfmax.vv v8, v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmax.nxv2f64.nxv2f64( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfmax.mask.nxv2f64.nxv2f64( - , - , - , - , - i32, - i32); - -define @intrinsic_vfmax_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv2f64_nxv2f64_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vfmax.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmax.mask.nxv2f64.nxv2f64( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfmax.nxv4f64.nxv4f64( - , - , - i32); - -define @intrinsic_vfmax_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmax_vv_nxv4f64_nxv4f64_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vfmax.vv v8, v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmax.nxv4f64.nxv4f64( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfmax.mask.nxv4f64.nxv4f64( - , - , - , - , - i32, - i32); - -define @intrinsic_vfmax_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv4f64_nxv4f64_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vfmax.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmax.mask.nxv4f64.nxv4f64( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfmax.nxv8f64.nxv8f64( - , - , - i32); - -define @intrinsic_vfmax_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmax_vv_nxv8f64_nxv8f64_nxv8f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu -; CHECK-NEXT: vfmax.vv v8, v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmax.nxv8f64.nxv8f64( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfmax.mask.nxv8f64.nxv8f64( - , - , - , - , - i32, - i32); - -define @intrinsic_vfmax_mask_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv8f64_nxv8f64_nxv8f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vl8re64.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu -; CHECK-NEXT: vfmax.vv v8, v16, v24, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmax.mask.nxv8f64.nxv8f64( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfmax.nxv1f16.f16( - , - half, - i32); - -define @intrinsic_vfmax_vf_nxv1f16_nxv1f16_f16( %0, half %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmax_vf_nxv1f16_nxv1f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfmax.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmax.nxv1f16.f16( - %0, - half %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfmax.mask.nxv1f16.f16( - , - , - half, - , - i32, - i32); - -define @intrinsic_vfmax_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv1f16_nxv1f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfmax.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmax.mask.nxv1f16.f16( - %0, - %1, - half %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfmax.nxv2f16.f16( - , - half, - i32); - -define @intrinsic_vfmax_vf_nxv2f16_nxv2f16_f16( %0, half %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmax_vf_nxv2f16_nxv2f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfmax.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmax.nxv2f16.f16( - %0, - half %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfmax.mask.nxv2f16.f16( - , - , - half, - , - i32, - i32); - -define @intrinsic_vfmax_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv2f16_nxv2f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfmax.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmax.mask.nxv2f16.f16( - %0, - %1, - half %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfmax.nxv4f16.f16( - , - half, - i32); - -define @intrinsic_vfmax_vf_nxv4f16_nxv4f16_f16( %0, half %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmax_vf_nxv4f16_nxv4f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfmax.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmax.nxv4f16.f16( - %0, - half %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfmax.mask.nxv4f16.f16( - , - , - half, - , - i32, - i32); - -define @intrinsic_vfmax_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv4f16_nxv4f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfmax.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmax.mask.nxv4f16.f16( - %0, - %1, - half %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfmax.nxv8f16.f16( - , - half, - i32); - -define @intrinsic_vfmax_vf_nxv8f16_nxv8f16_f16( %0, half %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmax_vf_nxv8f16_nxv8f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfmax.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmax.nxv8f16.f16( - %0, - half %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfmax.mask.nxv8f16.f16( - , - , - half, - , - i32, - i32); - -define @intrinsic_vfmax_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv8f16_nxv8f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfmax.vf v8, v10, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmax.mask.nxv8f16.f16( - %0, - %1, - half %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfmax.nxv16f16.f16( - , - half, - i32); - -define @intrinsic_vfmax_vf_nxv16f16_nxv16f16_f16( %0, half %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmax_vf_nxv16f16_nxv16f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfmax.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmax.nxv16f16.f16( - %0, - half %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfmax.mask.nxv16f16.f16( - , - , - half, - , - i32, - i32); - -define @intrinsic_vfmax_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv16f16_nxv16f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfmax.vf v8, v12, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmax.mask.nxv16f16.f16( - %0, - %1, - half %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfmax.nxv32f16.f16( - , - half, - i32); - -define @intrinsic_vfmax_vf_nxv32f16_nxv32f16_f16( %0, half %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmax_vf_nxv32f16_nxv32f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu -; CHECK-NEXT: vfmax.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmax.nxv32f16.f16( - %0, - half %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfmax.mask.nxv32f16.f16( - , - , - half, - , - i32, - i32); - -define @intrinsic_vfmax_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv32f16_nxv32f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu -; CHECK-NEXT: vfmax.vf v8, v16, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmax.mask.nxv32f16.f16( - %0, - %1, - half %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfmax.nxv1f32.f32( - , - float, - i32); - -define @intrinsic_vfmax_vf_nxv1f32_nxv1f32_f32( %0, float %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmax_vf_nxv1f32_nxv1f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfmax.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmax.nxv1f32.f32( - %0, - float %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfmax.mask.nxv1f32.f32( - , - , - float, - , - i32, - i32); - -define @intrinsic_vfmax_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv1f32_nxv1f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfmax.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmax.mask.nxv1f32.f32( - %0, - %1, - float %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfmax.nxv2f32.f32( - , - float, - i32); - -define @intrinsic_vfmax_vf_nxv2f32_nxv2f32_f32( %0, float %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmax_vf_nxv2f32_nxv2f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfmax.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmax.nxv2f32.f32( - %0, - float %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfmax.mask.nxv2f32.f32( - , - , - float, - , - i32, - i32); - -define @intrinsic_vfmax_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv2f32_nxv2f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfmax.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmax.mask.nxv2f32.f32( - %0, - %1, - float %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfmax.nxv4f32.f32( - , - float, - i32); - -define @intrinsic_vfmax_vf_nxv4f32_nxv4f32_f32( %0, float %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmax_vf_nxv4f32_nxv4f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfmax.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmax.nxv4f32.f32( - %0, - float %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfmax.mask.nxv4f32.f32( - , - , - float, - , - i32, - i32); - -define @intrinsic_vfmax_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv4f32_nxv4f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfmax.vf v8, v10, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmax.mask.nxv4f32.f32( - %0, - %1, - float %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfmax.nxv8f32.f32( - , - float, - i32); - -define @intrinsic_vfmax_vf_nxv8f32_nxv8f32_f32( %0, float %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmax_vf_nxv8f32_nxv8f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfmax.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmax.nxv8f32.f32( - %0, - float %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfmax.mask.nxv8f32.f32( - , - , - float, - , - i32, - i32); - -define @intrinsic_vfmax_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv8f32_nxv8f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfmax.vf v8, v12, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmax.mask.nxv8f32.f32( - %0, - %1, - float %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfmax.nxv16f32.f32( - , - float, - i32); - -define @intrinsic_vfmax_vf_nxv16f32_nxv16f32_f32( %0, float %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmax_vf_nxv16f32_nxv16f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu -; CHECK-NEXT: vfmax.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmax.nxv16f32.f32( - %0, - float %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfmax.mask.nxv16f32.f32( - , - , - float, - , - i32, - i32); - -define @intrinsic_vfmax_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv16f32_nxv16f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu -; CHECK-NEXT: vfmax.vf v8, v16, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmax.mask.nxv16f32.f32( - %0, - %1, - float %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfmax.nxv1f64.f64( - , - double, - i32); - -define @intrinsic_vfmax_vf_nxv1f64_nxv1f64_f64( %0, double %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmax_vf_nxv1f64_nxv1f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vfmax.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmax.nxv1f64.f64( - %0, - double %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfmax.mask.nxv1f64.f64( - , - , - double, - , - i32, - i32); - -define @intrinsic_vfmax_mask_vf_nxv1f64_nxv1f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv1f64_nxv1f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vfmax.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmax.mask.nxv1f64.f64( - %0, - %1, - double %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfmax.nxv2f64.f64( - , - double, - i32); - -define @intrinsic_vfmax_vf_nxv2f64_nxv2f64_f64( %0, double %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmax_vf_nxv2f64_nxv2f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vfmax.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmax.nxv2f64.f64( - %0, - double %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfmax.mask.nxv2f64.f64( - , - , - double, - , - i32, - i32); - -define @intrinsic_vfmax_mask_vf_nxv2f64_nxv2f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv2f64_nxv2f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vfmax.vf v8, v10, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmax.mask.nxv2f64.f64( - %0, - %1, - double %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfmax.nxv4f64.f64( - , - double, - i32); - -define @intrinsic_vfmax_vf_nxv4f64_nxv4f64_f64( %0, double %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmax_vf_nxv4f64_nxv4f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vfmax.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmax.nxv4f64.f64( - %0, - double %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfmax.mask.nxv4f64.f64( - , - , - double, - , - i32, - i32); - -define @intrinsic_vfmax_mask_vf_nxv4f64_nxv4f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv4f64_nxv4f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vfmax.vf v8, v12, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmax.mask.nxv4f64.f64( - %0, - %1, - double %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfmax.nxv8f64.f64( - , - double, - i32); - -define @intrinsic_vfmax_vf_nxv8f64_nxv8f64_f64( %0, double %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmax_vf_nxv8f64_nxv8f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu -; CHECK-NEXT: vfmax.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmax.nxv8f64.f64( - %0, - double %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfmax.mask.nxv8f64.f64( - , - , - double, - , - i32, - i32); - -define @intrinsic_vfmax_mask_vf_nxv8f64_nxv8f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv8f64_nxv8f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu -; CHECK-NEXT: vfmax.vf v8, v16, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmax.mask.nxv8f64.f64( - %0, - %1, - double %2, - %3, - i32 %4, i32 1) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmax-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfmax.ll similarity index 86% rename from llvm/test/CodeGen/RISCV/rvv/vfmax-rv64.ll rename to llvm/test/CodeGen/RISCV/rvv/vfmax.ll index 4fc7319fb2b2..446981928b6c 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfmax-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmax.ll @@ -1,12 +1,14 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: -target-abi=lp64d < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfmax.nxv1f16.nxv1f16( , , - i64); + iXLen); -define @intrinsic_vfmax_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, i64 %2) nounwind { +define @intrinsic_vfmax_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -16,7 +18,7 @@ entry: %a = call @llvm.riscv.vfmax.nxv1f16.nxv1f16( %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -26,10 +28,10 @@ declare @llvm.riscv.vfmax.mask.nxv1f16.nxv1f16( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfmax_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfmax_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -41,7 +43,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -49,9 +51,9 @@ entry: declare @llvm.riscv.vfmax.nxv2f16.nxv2f16( , , - i64); + iXLen); -define @intrinsic_vfmax_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, i64 %2) nounwind { +define @intrinsic_vfmax_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -61,7 +63,7 @@ entry: %a = call @llvm.riscv.vfmax.nxv2f16.nxv2f16( %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -71,10 +73,10 @@ declare @llvm.riscv.vfmax.mask.nxv2f16.nxv2f16( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfmax_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfmax_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -86,7 +88,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -94,9 +96,9 @@ entry: declare @llvm.riscv.vfmax.nxv4f16.nxv4f16( , , - i64); + iXLen); -define @intrinsic_vfmax_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, i64 %2) nounwind { +define @intrinsic_vfmax_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -106,7 +108,7 @@ entry: %a = call @llvm.riscv.vfmax.nxv4f16.nxv4f16( %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -116,10 +118,10 @@ declare @llvm.riscv.vfmax.mask.nxv4f16.nxv4f16( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfmax_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfmax_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -131,7 +133,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -139,9 +141,9 @@ entry: declare @llvm.riscv.vfmax.nxv8f16.nxv8f16( , , - i64); + iXLen); -define @intrinsic_vfmax_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, i64 %2) nounwind { +define @intrinsic_vfmax_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -151,7 +153,7 @@ entry: %a = call @llvm.riscv.vfmax.nxv8f16.nxv8f16( %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -161,10 +163,10 @@ declare @llvm.riscv.vfmax.mask.nxv8f16.nxv8f16( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfmax_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfmax_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -176,7 +178,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -184,9 +186,9 @@ entry: declare @llvm.riscv.vfmax.nxv16f16.nxv16f16( , , - i64); + iXLen); -define @intrinsic_vfmax_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, i64 %2) nounwind { +define @intrinsic_vfmax_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -196,7 +198,7 @@ entry: %a = call @llvm.riscv.vfmax.nxv16f16.nxv16f16( %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -206,10 +208,10 @@ declare @llvm.riscv.vfmax.mask.nxv16f16.nxv16f16( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfmax_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfmax_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -221,7 +223,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -229,9 +231,9 @@ entry: declare @llvm.riscv.vfmax.nxv32f16.nxv32f16( , , - i64); + iXLen); -define @intrinsic_vfmax_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, i64 %2) nounwind { +define @intrinsic_vfmax_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vv_nxv32f16_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu @@ -241,7 +243,7 @@ entry: %a = call @llvm.riscv.vfmax.nxv32f16.nxv32f16( %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -251,10 +253,10 @@ declare @llvm.riscv.vfmax.mask.nxv32f16.nxv32f16( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfmax_mask_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfmax_mask_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv32f16_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re16.v v24, (a0) @@ -267,7 +269,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -275,9 +277,9 @@ entry: declare @llvm.riscv.vfmax.nxv1f32.nxv1f32( , , - i64); + iXLen); -define @intrinsic_vfmax_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, i64 %2) nounwind { +define @intrinsic_vfmax_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -287,7 +289,7 @@ entry: %a = call @llvm.riscv.vfmax.nxv1f32.nxv1f32( %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -297,10 +299,10 @@ declare @llvm.riscv.vfmax.mask.nxv1f32.nxv1f32( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfmax_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfmax_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -312,7 +314,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -320,9 +322,9 @@ entry: declare @llvm.riscv.vfmax.nxv2f32.nxv2f32( , , - i64); + iXLen); -define @intrinsic_vfmax_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, i64 %2) nounwind { +define @intrinsic_vfmax_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -332,7 +334,7 @@ entry: %a = call @llvm.riscv.vfmax.nxv2f32.nxv2f32( %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -342,10 +344,10 @@ declare @llvm.riscv.vfmax.mask.nxv2f32.nxv2f32( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfmax_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfmax_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -357,7 +359,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -365,9 +367,9 @@ entry: declare @llvm.riscv.vfmax.nxv4f32.nxv4f32( , , - i64); + iXLen); -define @intrinsic_vfmax_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, i64 %2) nounwind { +define @intrinsic_vfmax_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -377,7 +379,7 @@ entry: %a = call @llvm.riscv.vfmax.nxv4f32.nxv4f32( %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -387,10 +389,10 @@ declare @llvm.riscv.vfmax.mask.nxv4f32.nxv4f32( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfmax_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfmax_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -402,7 +404,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -410,9 +412,9 @@ entry: declare @llvm.riscv.vfmax.nxv8f32.nxv8f32( , , - i64); + iXLen); -define @intrinsic_vfmax_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, i64 %2) nounwind { +define @intrinsic_vfmax_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -422,7 +424,7 @@ entry: %a = call @llvm.riscv.vfmax.nxv8f32.nxv8f32( %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -432,10 +434,10 @@ declare @llvm.riscv.vfmax.mask.nxv8f32.nxv8f32( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfmax_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfmax_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -447,7 +449,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -455,9 +457,9 @@ entry: declare @llvm.riscv.vfmax.nxv16f32.nxv16f32( , , - i64); + iXLen); -define @intrinsic_vfmax_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, i64 %2) nounwind { +define @intrinsic_vfmax_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vv_nxv16f32_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu @@ -467,7 +469,7 @@ entry: %a = call @llvm.riscv.vfmax.nxv16f32.nxv16f32( %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -477,10 +479,10 @@ declare @llvm.riscv.vfmax.mask.nxv16f32.nxv16f32( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfmax_mask_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfmax_mask_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv16f32_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re32.v v24, (a0) @@ -493,7 +495,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -501,9 +503,9 @@ entry: declare @llvm.riscv.vfmax.nxv1f64.nxv1f64( , , - i64); + iXLen); -define @intrinsic_vfmax_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, i64 %2) nounwind { +define @intrinsic_vfmax_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu @@ -513,7 +515,7 @@ entry: %a = call @llvm.riscv.vfmax.nxv1f64.nxv1f64( %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -523,10 +525,10 @@ declare @llvm.riscv.vfmax.mask.nxv1f64.nxv1f64( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfmax_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfmax_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu @@ -538,7 +540,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -546,9 +548,9 @@ entry: declare @llvm.riscv.vfmax.nxv2f64.nxv2f64( , , - i64); + iXLen); -define @intrinsic_vfmax_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, i64 %2) nounwind { +define @intrinsic_vfmax_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu @@ -558,7 +560,7 @@ entry: %a = call @llvm.riscv.vfmax.nxv2f64.nxv2f64( %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -568,10 +570,10 @@ declare @llvm.riscv.vfmax.mask.nxv2f64.nxv2f64( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfmax_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfmax_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu @@ -583,7 +585,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -591,9 +593,9 @@ entry: declare @llvm.riscv.vfmax.nxv4f64.nxv4f64( , , - i64); + iXLen); -define @intrinsic_vfmax_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, i64 %2) nounwind { +define @intrinsic_vfmax_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu @@ -603,7 +605,7 @@ entry: %a = call @llvm.riscv.vfmax.nxv4f64.nxv4f64( %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -613,10 +615,10 @@ declare @llvm.riscv.vfmax.mask.nxv4f64.nxv4f64( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfmax_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfmax_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu @@ -628,7 +630,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -636,9 +638,9 @@ entry: declare @llvm.riscv.vfmax.nxv8f64.nxv8f64( , , - i64); + iXLen); -define @intrinsic_vfmax_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, i64 %2) nounwind { +define @intrinsic_vfmax_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vv_nxv8f64_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu @@ -648,7 +650,7 @@ entry: %a = call @llvm.riscv.vfmax.nxv8f64.nxv8f64( %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -658,10 +660,10 @@ declare @llvm.riscv.vfmax.mask.nxv8f64.nxv8f64( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfmax_mask_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfmax_mask_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv8f64_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re64.v v24, (a0) @@ -674,7 +676,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -682,9 +684,9 @@ entry: declare @llvm.riscv.vfmax.nxv1f16.f16( , half, - i64); + iXLen); -define @intrinsic_vfmax_vf_nxv1f16_nxv1f16_f16( %0, half %1, i64 %2) nounwind { +define @intrinsic_vfmax_vf_nxv1f16_nxv1f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -694,7 +696,7 @@ entry: %a = call @llvm.riscv.vfmax.nxv1f16.f16( %0, half %1, - i64 %2) + iXLen %2) ret %a } @@ -704,10 +706,10 @@ declare @llvm.riscv.vfmax.mask.nxv1f16.f16( , half, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfmax_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { +define @intrinsic_vfmax_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -719,7 +721,7 @@ entry: %1, half %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -727,9 +729,9 @@ entry: declare @llvm.riscv.vfmax.nxv2f16.f16( , half, - i64); + iXLen); -define @intrinsic_vfmax_vf_nxv2f16_nxv2f16_f16( %0, half %1, i64 %2) nounwind { +define @intrinsic_vfmax_vf_nxv2f16_nxv2f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -739,7 +741,7 @@ entry: %a = call @llvm.riscv.vfmax.nxv2f16.f16( %0, half %1, - i64 %2) + iXLen %2) ret %a } @@ -749,10 +751,10 @@ declare @llvm.riscv.vfmax.mask.nxv2f16.f16( , half, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfmax_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { +define @intrinsic_vfmax_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -764,7 +766,7 @@ entry: %1, half %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -772,9 +774,9 @@ entry: declare @llvm.riscv.vfmax.nxv4f16.f16( , half, - i64); + iXLen); -define @intrinsic_vfmax_vf_nxv4f16_nxv4f16_f16( %0, half %1, i64 %2) nounwind { +define @intrinsic_vfmax_vf_nxv4f16_nxv4f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -784,7 +786,7 @@ entry: %a = call @llvm.riscv.vfmax.nxv4f16.f16( %0, half %1, - i64 %2) + iXLen %2) ret %a } @@ -794,10 +796,10 @@ declare @llvm.riscv.vfmax.mask.nxv4f16.f16( , half, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfmax_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { +define @intrinsic_vfmax_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -809,7 +811,7 @@ entry: %1, half %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -817,9 +819,9 @@ entry: declare @llvm.riscv.vfmax.nxv8f16.f16( , half, - i64); + iXLen); -define @intrinsic_vfmax_vf_nxv8f16_nxv8f16_f16( %0, half %1, i64 %2) nounwind { +define @intrinsic_vfmax_vf_nxv8f16_nxv8f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -829,7 +831,7 @@ entry: %a = call @llvm.riscv.vfmax.nxv8f16.f16( %0, half %1, - i64 %2) + iXLen %2) ret %a } @@ -839,10 +841,10 @@ declare @llvm.riscv.vfmax.mask.nxv8f16.f16( , half, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfmax_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { +define @intrinsic_vfmax_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -854,7 +856,7 @@ entry: %1, half %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -862,9 +864,9 @@ entry: declare @llvm.riscv.vfmax.nxv16f16.f16( , half, - i64); + iXLen); -define @intrinsic_vfmax_vf_nxv16f16_nxv16f16_f16( %0, half %1, i64 %2) nounwind { +define @intrinsic_vfmax_vf_nxv16f16_nxv16f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -874,7 +876,7 @@ entry: %a = call @llvm.riscv.vfmax.nxv16f16.f16( %0, half %1, - i64 %2) + iXLen %2) ret %a } @@ -884,10 +886,10 @@ declare @llvm.riscv.vfmax.mask.nxv16f16.f16( , half, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfmax_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { +define @intrinsic_vfmax_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -899,7 +901,7 @@ entry: %1, half %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -907,9 +909,9 @@ entry: declare @llvm.riscv.vfmax.nxv32f16.f16( , half, - i64); + iXLen); -define @intrinsic_vfmax_vf_nxv32f16_nxv32f16_f16( %0, half %1, i64 %2) nounwind { +define @intrinsic_vfmax_vf_nxv32f16_nxv32f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu @@ -919,7 +921,7 @@ entry: %a = call @llvm.riscv.vfmax.nxv32f16.f16( %0, half %1, - i64 %2) + iXLen %2) ret %a } @@ -929,10 +931,10 @@ declare @llvm.riscv.vfmax.mask.nxv32f16.f16( , half, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfmax_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { +define @intrinsic_vfmax_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu @@ -944,7 +946,7 @@ entry: %1, half %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -952,9 +954,9 @@ entry: declare @llvm.riscv.vfmax.nxv1f32.f32( , float, - i64); + iXLen); -define @intrinsic_vfmax_vf_nxv1f32_nxv1f32_f32( %0, float %1, i64 %2) nounwind { +define @intrinsic_vfmax_vf_nxv1f32_nxv1f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -964,7 +966,7 @@ entry: %a = call @llvm.riscv.vfmax.nxv1f32.f32( %0, float %1, - i64 %2) + iXLen %2) ret %a } @@ -974,10 +976,10 @@ declare @llvm.riscv.vfmax.mask.nxv1f32.f32( , float, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfmax_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { +define @intrinsic_vfmax_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -989,7 +991,7 @@ entry: %1, float %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -997,9 +999,9 @@ entry: declare @llvm.riscv.vfmax.nxv2f32.f32( , float, - i64); + iXLen); -define @intrinsic_vfmax_vf_nxv2f32_nxv2f32_f32( %0, float %1, i64 %2) nounwind { +define @intrinsic_vfmax_vf_nxv2f32_nxv2f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -1009,7 +1011,7 @@ entry: %a = call @llvm.riscv.vfmax.nxv2f32.f32( %0, float %1, - i64 %2) + iXLen %2) ret %a } @@ -1019,10 +1021,10 @@ declare @llvm.riscv.vfmax.mask.nxv2f32.f32( , float, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfmax_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { +define @intrinsic_vfmax_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -1034,7 +1036,7 @@ entry: %1, float %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -1042,9 +1044,9 @@ entry: declare @llvm.riscv.vfmax.nxv4f32.f32( , float, - i64); + iXLen); -define @intrinsic_vfmax_vf_nxv4f32_nxv4f32_f32( %0, float %1, i64 %2) nounwind { +define @intrinsic_vfmax_vf_nxv4f32_nxv4f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -1054,7 +1056,7 @@ entry: %a = call @llvm.riscv.vfmax.nxv4f32.f32( %0, float %1, - i64 %2) + iXLen %2) ret %a } @@ -1064,10 +1066,10 @@ declare @llvm.riscv.vfmax.mask.nxv4f32.f32( , float, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfmax_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { +define @intrinsic_vfmax_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -1079,7 +1081,7 @@ entry: %1, float %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -1087,9 +1089,9 @@ entry: declare @llvm.riscv.vfmax.nxv8f32.f32( , float, - i64); + iXLen); -define @intrinsic_vfmax_vf_nxv8f32_nxv8f32_f32( %0, float %1, i64 %2) nounwind { +define @intrinsic_vfmax_vf_nxv8f32_nxv8f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -1099,7 +1101,7 @@ entry: %a = call @llvm.riscv.vfmax.nxv8f32.f32( %0, float %1, - i64 %2) + iXLen %2) ret %a } @@ -1109,10 +1111,10 @@ declare @llvm.riscv.vfmax.mask.nxv8f32.f32( , float, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfmax_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { +define @intrinsic_vfmax_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -1124,7 +1126,7 @@ entry: %1, float %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -1132,9 +1134,9 @@ entry: declare @llvm.riscv.vfmax.nxv16f32.f32( , float, - i64); + iXLen); -define @intrinsic_vfmax_vf_nxv16f32_nxv16f32_f32( %0, float %1, i64 %2) nounwind { +define @intrinsic_vfmax_vf_nxv16f32_nxv16f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu @@ -1144,7 +1146,7 @@ entry: %a = call @llvm.riscv.vfmax.nxv16f32.f32( %0, float %1, - i64 %2) + iXLen %2) ret %a } @@ -1154,10 +1156,10 @@ declare @llvm.riscv.vfmax.mask.nxv16f32.f32( , float, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfmax_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { +define @intrinsic_vfmax_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu @@ -1169,7 +1171,7 @@ entry: %1, float %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -1177,9 +1179,9 @@ entry: declare @llvm.riscv.vfmax.nxv1f64.f64( , double, - i64); + iXLen); -define @intrinsic_vfmax_vf_nxv1f64_nxv1f64_f64( %0, double %1, i64 %2) nounwind { +define @intrinsic_vfmax_vf_nxv1f64_nxv1f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu @@ -1189,7 +1191,7 @@ entry: %a = call @llvm.riscv.vfmax.nxv1f64.f64( %0, double %1, - i64 %2) + iXLen %2) ret %a } @@ -1199,10 +1201,10 @@ declare @llvm.riscv.vfmax.mask.nxv1f64.f64( , double, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfmax_mask_vf_nxv1f64_nxv1f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { +define @intrinsic_vfmax_mask_vf_nxv1f64_nxv1f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu @@ -1214,7 +1216,7 @@ entry: %1, double %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -1222,9 +1224,9 @@ entry: declare @llvm.riscv.vfmax.nxv2f64.f64( , double, - i64); + iXLen); -define @intrinsic_vfmax_vf_nxv2f64_nxv2f64_f64( %0, double %1, i64 %2) nounwind { +define @intrinsic_vfmax_vf_nxv2f64_nxv2f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu @@ -1234,7 +1236,7 @@ entry: %a = call @llvm.riscv.vfmax.nxv2f64.f64( %0, double %1, - i64 %2) + iXLen %2) ret %a } @@ -1244,10 +1246,10 @@ declare @llvm.riscv.vfmax.mask.nxv2f64.f64( , double, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfmax_mask_vf_nxv2f64_nxv2f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { +define @intrinsic_vfmax_mask_vf_nxv2f64_nxv2f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu @@ -1259,7 +1261,7 @@ entry: %1, double %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -1267,9 +1269,9 @@ entry: declare @llvm.riscv.vfmax.nxv4f64.f64( , double, - i64); + iXLen); -define @intrinsic_vfmax_vf_nxv4f64_nxv4f64_f64( %0, double %1, i64 %2) nounwind { +define @intrinsic_vfmax_vf_nxv4f64_nxv4f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu @@ -1279,7 +1281,7 @@ entry: %a = call @llvm.riscv.vfmax.nxv4f64.f64( %0, double %1, - i64 %2) + iXLen %2) ret %a } @@ -1289,10 +1291,10 @@ declare @llvm.riscv.vfmax.mask.nxv4f64.f64( , double, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfmax_mask_vf_nxv4f64_nxv4f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { +define @intrinsic_vfmax_mask_vf_nxv4f64_nxv4f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu @@ -1304,7 +1306,7 @@ entry: %1, double %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -1312,9 +1314,9 @@ entry: declare @llvm.riscv.vfmax.nxv8f64.f64( , double, - i64); + iXLen); -define @intrinsic_vfmax_vf_nxv8f64_nxv8f64_f64( %0, double %1, i64 %2) nounwind { +define @intrinsic_vfmax_vf_nxv8f64_nxv8f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu @@ -1324,7 +1326,7 @@ entry: %a = call @llvm.riscv.vfmax.nxv8f64.f64( %0, double %1, - i64 %2) + iXLen %2) ret %a } @@ -1334,10 +1336,10 @@ declare @llvm.riscv.vfmax.mask.nxv8f64.f64( , double, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfmax_mask_vf_nxv8f64_nxv8f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { +define @intrinsic_vfmax_mask_vf_nxv8f64_nxv8f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu @@ -1349,7 +1351,7 @@ entry: %1, double %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmerge-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfmerge-rv32.ll deleted file mode 100644 index 3dc1240d0f7b..000000000000 --- a/llvm/test/CodeGen/RISCV/rvv/vfmerge-rv32.ll +++ /dev/null @@ -1,902 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: -target-abi=ilp32d < %s | FileCheck %s -declare @llvm.riscv.vfmerge.nxv1f16.nxv1f16( - , - , - , - i32); - -define @intrinsic_vfmerge_vvm_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmerge_vvm_nxv1f16_nxv1f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmerge.nxv1f16.nxv1f16( - %0, - %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vfmerge.nxv1f16.f16( - , - half, - , - i32); - -define @intrinsic_vfmerge_vfm_nxv1f16_nxv1f16_f16( %0, half %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv1f16_nxv1f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmerge.nxv1f16.f16( - %0, - half %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vfmerge.nxv2f16.nxv2f16( - , - , - , - i32); - -define @intrinsic_vfmerge_vvm_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmerge_vvm_nxv2f16_nxv2f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmerge.nxv2f16.nxv2f16( - %0, - %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vfmerge.nxv2f16.f16( - , - half, - , - i32); - -define @intrinsic_vfmerge_vfm_nxv2f16_nxv2f16_f16( %0, half %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv2f16_nxv2f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmerge.nxv2f16.f16( - %0, - half %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vfmerge.nxv4f16.nxv4f16( - , - , - , - i32); - -define @intrinsic_vfmerge_vvm_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmerge_vvm_nxv4f16_nxv4f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmerge.nxv4f16.nxv4f16( - %0, - %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vfmerge.nxv4f16.f16( - , - half, - , - i32); - -define @intrinsic_vfmerge_vfm_nxv4f16_nxv4f16_f16( %0, half %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv4f16_nxv4f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmerge.nxv4f16.f16( - %0, - half %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vfmerge.nxv8f16.nxv8f16( - , - , - , - i32); - -define @intrinsic_vfmerge_vvm_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmerge_vvm_nxv8f16_nxv8f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vmerge.vvm v8, v8, v10, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmerge.nxv8f16.nxv8f16( - %0, - %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vfmerge.nxv8f16.f16( - , - half, - , - i32); - -define @intrinsic_vfmerge_vfm_nxv8f16_nxv8f16_f16( %0, half %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv8f16_nxv8f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmerge.nxv8f16.f16( - %0, - half %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vfmerge.nxv16f16.nxv16f16( - , - , - , - i32); - -define @intrinsic_vfmerge_vvm_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmerge_vvm_nxv16f16_nxv16f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vmerge.vvm v8, v8, v12, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmerge.nxv16f16.nxv16f16( - %0, - %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vfmerge.nxv16f16.f16( - , - half, - , - i32); - -define @intrinsic_vfmerge_vfm_nxv16f16_nxv16f16_f16( %0, half %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv16f16_nxv16f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmerge.nxv16f16.f16( - %0, - half %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vfmerge.nxv32f16.nxv32f16( - , - , - , - i32); - -define @intrinsic_vfmerge_vvm_nxv32f16_nxv32f16_nxv32f16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmerge_vvm_nxv32f16_nxv32f16_nxv32f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu -; CHECK-NEXT: vmerge.vvm v8, v8, v16, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmerge.nxv32f16.nxv32f16( - %0, - %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vfmerge.nxv32f16.f16( - , - half, - , - i32); - -define @intrinsic_vfmerge_vfm_nxv32f16_nxv32f16_f16( %0, half %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv32f16_nxv32f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu -; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmerge.nxv32f16.f16( - %0, - half %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vfmerge.nxv1f32.nxv1f32( - , - , - , - i32); - -define @intrinsic_vfmerge_vvm_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmerge_vvm_nxv1f32_nxv1f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmerge.nxv1f32.nxv1f32( - %0, - %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vfmerge.nxv1f32.f32( - , - float, - , - i32); - -define @intrinsic_vfmerge_vfm_nxv1f32_nxv1f32_f32( %0, float %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv1f32_nxv1f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmerge.nxv1f32.f32( - %0, - float %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vfmerge.nxv2f32.nxv2f32( - , - , - , - i32); - -define @intrinsic_vfmerge_vvm_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmerge_vvm_nxv2f32_nxv2f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmerge.nxv2f32.nxv2f32( - %0, - %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vfmerge.nxv2f32.f32( - , - float, - , - i32); - -define @intrinsic_vfmerge_vfm_nxv2f32_nxv2f32_f32( %0, float %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv2f32_nxv2f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmerge.nxv2f32.f32( - %0, - float %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vfmerge.nxv4f32.nxv4f32( - , - , - , - i32); - -define @intrinsic_vfmerge_vvm_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmerge_vvm_nxv4f32_nxv4f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vmerge.vvm v8, v8, v10, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmerge.nxv4f32.nxv4f32( - %0, - %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vfmerge.nxv4f32.f32( - , - float, - , - i32); - -define @intrinsic_vfmerge_vfm_nxv4f32_nxv4f32_f32( %0, float %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv4f32_nxv4f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmerge.nxv4f32.f32( - %0, - float %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vfmerge.nxv8f32.nxv8f32( - , - , - , - i32); - -define @intrinsic_vfmerge_vvm_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmerge_vvm_nxv8f32_nxv8f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vmerge.vvm v8, v8, v12, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmerge.nxv8f32.nxv8f32( - %0, - %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vfmerge.nxv8f32.f32( - , - float, - , - i32); - -define @intrinsic_vfmerge_vfm_nxv8f32_nxv8f32_f32( %0, float %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv8f32_nxv8f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmerge.nxv8f32.f32( - %0, - float %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vfmerge.nxv16f32.nxv16f32( - , - , - , - i32); - -define @intrinsic_vfmerge_vvm_nxv16f32_nxv16f32_nxv16f32( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmerge_vvm_nxv16f32_nxv16f32_nxv16f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu -; CHECK-NEXT: vmerge.vvm v8, v8, v16, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmerge.nxv16f32.nxv16f32( - %0, - %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vfmerge.nxv16f32.f32( - , - float, - , - i32); - -define @intrinsic_vfmerge_vfm_nxv16f32_nxv16f32_f32( %0, float %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv16f32_nxv16f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu -; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmerge.nxv16f32.f32( - %0, - float %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vfmerge.nxv1f64.nxv1f64( - , - , - , - i32); - -define @intrinsic_vfmerge_vvm_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmerge_vvm_nxv1f64_nxv1f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmerge.nxv1f64.nxv1f64( - %0, - %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vfmerge.nxv1f64.f64( - , - double, - , - i32); - -define @intrinsic_vfmerge_vfm_nxv1f64_nxv1f64_f64( %0, double %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv1f64_nxv1f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmerge.nxv1f64.f64( - %0, - double %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vfmerge.nxv2f64.nxv2f64( - , - , - , - i32); - -define @intrinsic_vfmerge_vvm_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmerge_vvm_nxv2f64_nxv2f64_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vmerge.vvm v8, v8, v10, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmerge.nxv2f64.nxv2f64( - %0, - %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vfmerge.nxv2f64.f64( - , - double, - , - i32); - -define @intrinsic_vfmerge_vfm_nxv2f64_nxv2f64_f64( %0, double %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv2f64_nxv2f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmerge.nxv2f64.f64( - %0, - double %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vfmerge.nxv4f64.nxv4f64( - , - , - , - i32); - -define @intrinsic_vfmerge_vvm_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmerge_vvm_nxv4f64_nxv4f64_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vmerge.vvm v8, v8, v12, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmerge.nxv4f64.nxv4f64( - %0, - %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vfmerge.nxv4f64.f64( - , - double, - , - i32); - -define @intrinsic_vfmerge_vfm_nxv4f64_nxv4f64_f64( %0, double %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv4f64_nxv4f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmerge.nxv4f64.f64( - %0, - double %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vfmerge.nxv8f64.nxv8f64( - , - , - , - i32); - -define @intrinsic_vfmerge_vvm_nxv8f64_nxv8f64_nxv8f64( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmerge_vvm_nxv8f64_nxv8f64_nxv8f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu -; CHECK-NEXT: vmerge.vvm v8, v8, v16, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmerge.nxv8f64.nxv8f64( - %0, - %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vfmerge.nxv8f64.f64( - , - double, - , - i32); - -define @intrinsic_vfmerge_vfm_nxv8f64_nxv8f64_f64( %0, double %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv8f64_nxv8f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu -; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmerge.nxv8f64.f64( - %0, - double %1, - %2, - i32 %3) - - ret %a -} - -define @intrinsic_vfmerge_vzm_nxv1f16_nxv1f16_f16( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv1f16_nxv1f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vmerge.vim v8, v8, 0, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmerge.nxv1f16.f16( - %0, - half zeroinitializer, - %1, - i32 %2) - - ret %a -} - -define @intrinsic_vfmerge_vzm_nxv2f16_nxv2f16_f16( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv2f16_nxv2f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vmerge.vim v8, v8, 0, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmerge.nxv2f16.f16( - %0, - half zeroinitializer, - %1, - i32 %2) - - ret %a -} - -define @intrinsic_vfmerge_vzm_nxv4f16_nxv4f16_f16( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv4f16_nxv4f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vmerge.vim v8, v8, 0, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmerge.nxv4f16.f16( - %0, - half zeroinitializer, - %1, - i32 %2) - - ret %a -} - -define @intrinsic_vfmerge_vzm_nxv8f16_nxv8f16_f16( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv8f16_nxv8f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vmerge.vim v8, v8, 0, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmerge.nxv8f16.f16( - %0, - half zeroinitializer, - %1, - i32 %2) - - ret %a -} - -define @intrinsic_vfmerge_vzm_nxv16f16_nxv16f16_f16( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv16f16_nxv16f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vmerge.vim v8, v8, 0, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmerge.nxv16f16.f16( - %0, - half zeroinitializer, - %1, - i32 %2) - - ret %a -} - -define @intrinsic_vfmerge_vzm_nxv32f16_nxv32f16_f16( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv32f16_nxv32f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu -; CHECK-NEXT: vmerge.vim v8, v8, 0, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmerge.nxv32f16.f16( - %0, - half zeroinitializer, - %1, - i32 %2) - - ret %a -} - -define @intrinsic_vfmerge_vzm_nxv1f32_nxv1f32_f32( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv1f32_nxv1f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vmerge.vim v8, v8, 0, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmerge.nxv1f32.f32( - %0, - float zeroinitializer, - %1, - i32 %2) - - ret %a -} - -define @intrinsic_vfmerge_vzm_nxv2f32_nxv2f32_f32( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv2f32_nxv2f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vmerge.vim v8, v8, 0, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmerge.nxv2f32.f32( - %0, - float zeroinitializer, - %1, - i32 %2) - - ret %a -} - -define @intrinsic_vfmerge_vzm_nxv4f32_nxv4f32_f32( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv4f32_nxv4f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vmerge.vim v8, v8, 0, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmerge.nxv4f32.f32( - %0, - float zeroinitializer, - %1, - i32 %2) - - ret %a -} - -define @intrinsic_vfmerge_vzm_nxv8f32_nxv8f32_f32( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv8f32_nxv8f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vmerge.vim v8, v8, 0, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmerge.nxv8f32.f32( - %0, - float zeroinitializer, - %1, - i32 %2) - - ret %a -} - -define @intrinsic_vfmerge_vzm_nxv16f32_nxv16f32_f32( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv16f32_nxv16f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu -; CHECK-NEXT: vmerge.vim v8, v8, 0, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmerge.nxv16f32.f32( - %0, - float zeroinitializer, - %1, - i32 %2) - - ret %a -} - -define @intrinsic_vfmerge_vzm_nxv1f64_nxv1f64_f64( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv1f64_nxv1f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vmerge.vim v8, v8, 0, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmerge.nxv1f64.f64( - %0, - double zeroinitializer, - %1, - i32 %2) - - ret %a -} - -define @intrinsic_vfmerge_vzm_nxv2f64_nxv2f64_f64( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv2f64_nxv2f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vmerge.vim v8, v8, 0, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmerge.nxv2f64.f64( - %0, - double zeroinitializer, - %1, - i32 %2) - - ret %a -} - -define @intrinsic_vfmerge_vzm_nxv4f64_nxv4f64_f64( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv4f64_nxv4f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vmerge.vim v8, v8, 0, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmerge.nxv4f64.f64( - %0, - double zeroinitializer, - %1, - i32 %2) - - ret %a -} - -define @intrinsic_vfmerge_vzm_nxv8f64_nxv8f64_f64( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv8f64_nxv8f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu -; CHECK-NEXT: vmerge.vim v8, v8, 0, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmerge.nxv8f64.f64( - %0, - double zeroinitializer, - %1, - i32 %2) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmerge-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfmerge.ll similarity index 88% rename from llvm/test/CodeGen/RISCV/rvv/vfmerge-rv64.ll rename to llvm/test/CodeGen/RISCV/rvv/vfmerge.ll index b23d908c7edd..eb3efd1fa037 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfmerge-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmerge.ll @@ -1,13 +1,15 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: -target-abi=lp64d < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfmerge.nxv1f16.nxv1f16( , , , - i64); + iXLen); -define @intrinsic_vfmerge_vvm_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vfmerge_vvm_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vvm_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -18,7 +20,7 @@ entry: %0, %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -27,9 +29,9 @@ declare @llvm.riscv.vfmerge.nxv1f16.f16( , half, , - i64); + iXLen); -define @intrinsic_vfmerge_vfm_nxv1f16_nxv1f16_f16( %0, half %1, %2, i64 %3) nounwind { +define @intrinsic_vfmerge_vfm_nxv1f16_nxv1f16_f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -40,7 +42,7 @@ entry: %0, half %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -49,9 +51,9 @@ declare @llvm.riscv.vfmerge.nxv2f16.nxv2f16( , , , - i64); + iXLen); -define @intrinsic_vfmerge_vvm_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vfmerge_vvm_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vvm_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -62,7 +64,7 @@ entry: %0, %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -71,9 +73,9 @@ declare @llvm.riscv.vfmerge.nxv2f16.f16( , half, , - i64); + iXLen); -define @intrinsic_vfmerge_vfm_nxv2f16_nxv2f16_f16( %0, half %1, %2, i64 %3) nounwind { +define @intrinsic_vfmerge_vfm_nxv2f16_nxv2f16_f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -84,7 +86,7 @@ entry: %0, half %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -93,9 +95,9 @@ declare @llvm.riscv.vfmerge.nxv4f16.nxv4f16( , , , - i64); + iXLen); -define @intrinsic_vfmerge_vvm_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vfmerge_vvm_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vvm_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -106,7 +108,7 @@ entry: %0, %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -115,9 +117,9 @@ declare @llvm.riscv.vfmerge.nxv4f16.f16( , half, , - i64); + iXLen); -define @intrinsic_vfmerge_vfm_nxv4f16_nxv4f16_f16( %0, half %1, %2, i64 %3) nounwind { +define @intrinsic_vfmerge_vfm_nxv4f16_nxv4f16_f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -128,7 +130,7 @@ entry: %0, half %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -137,9 +139,9 @@ declare @llvm.riscv.vfmerge.nxv8f16.nxv8f16( , , , - i64); + iXLen); -define @intrinsic_vfmerge_vvm_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vfmerge_vvm_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vvm_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -150,7 +152,7 @@ entry: %0, %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -159,9 +161,9 @@ declare @llvm.riscv.vfmerge.nxv8f16.f16( , half, , - i64); + iXLen); -define @intrinsic_vfmerge_vfm_nxv8f16_nxv8f16_f16( %0, half %1, %2, i64 %3) nounwind { +define @intrinsic_vfmerge_vfm_nxv8f16_nxv8f16_f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -172,7 +174,7 @@ entry: %0, half %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -181,9 +183,9 @@ declare @llvm.riscv.vfmerge.nxv16f16.nxv16f16( , , , - i64); + iXLen); -define @intrinsic_vfmerge_vvm_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vfmerge_vvm_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vvm_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -194,7 +196,7 @@ entry: %0, %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -203,9 +205,9 @@ declare @llvm.riscv.vfmerge.nxv16f16.f16( , half, , - i64); + iXLen); -define @intrinsic_vfmerge_vfm_nxv16f16_nxv16f16_f16( %0, half %1, %2, i64 %3) nounwind { +define @intrinsic_vfmerge_vfm_nxv16f16_nxv16f16_f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -216,7 +218,7 @@ entry: %0, half %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -225,9 +227,9 @@ declare @llvm.riscv.vfmerge.nxv32f16.nxv32f16( , , , - i64); + iXLen); -define @intrinsic_vfmerge_vvm_nxv32f16_nxv32f16_nxv32f16( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vfmerge_vvm_nxv32f16_nxv32f16_nxv32f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vvm_nxv32f16_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu @@ -238,7 +240,7 @@ entry: %0, %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -247,9 +249,9 @@ declare @llvm.riscv.vfmerge.nxv32f16.f16( , half, , - i64); + iXLen); -define @intrinsic_vfmerge_vfm_nxv32f16_nxv32f16_f16( %0, half %1, %2, i64 %3) nounwind { +define @intrinsic_vfmerge_vfm_nxv32f16_nxv32f16_f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu @@ -260,7 +262,7 @@ entry: %0, half %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -269,9 +271,9 @@ declare @llvm.riscv.vfmerge.nxv1f32.nxv1f32( , , , - i64); + iXLen); -define @intrinsic_vfmerge_vvm_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vfmerge_vvm_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vvm_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -282,7 +284,7 @@ entry: %0, %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -291,9 +293,9 @@ declare @llvm.riscv.vfmerge.nxv1f32.f32( , float, , - i64); + iXLen); -define @intrinsic_vfmerge_vfm_nxv1f32_nxv1f32_f32( %0, float %1, %2, i64 %3) nounwind { +define @intrinsic_vfmerge_vfm_nxv1f32_nxv1f32_f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -304,7 +306,7 @@ entry: %0, float %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -313,9 +315,9 @@ declare @llvm.riscv.vfmerge.nxv2f32.nxv2f32( , , , - i64); + iXLen); -define @intrinsic_vfmerge_vvm_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vfmerge_vvm_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vvm_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -326,7 +328,7 @@ entry: %0, %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -335,9 +337,9 @@ declare @llvm.riscv.vfmerge.nxv2f32.f32( , float, , - i64); + iXLen); -define @intrinsic_vfmerge_vfm_nxv2f32_nxv2f32_f32( %0, float %1, %2, i64 %3) nounwind { +define @intrinsic_vfmerge_vfm_nxv2f32_nxv2f32_f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -348,7 +350,7 @@ entry: %0, float %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -357,9 +359,9 @@ declare @llvm.riscv.vfmerge.nxv4f32.nxv4f32( , , , - i64); + iXLen); -define @intrinsic_vfmerge_vvm_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vfmerge_vvm_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vvm_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -370,7 +372,7 @@ entry: %0, %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -379,9 +381,9 @@ declare @llvm.riscv.vfmerge.nxv4f32.f32( , float, , - i64); + iXLen); -define @intrinsic_vfmerge_vfm_nxv4f32_nxv4f32_f32( %0, float %1, %2, i64 %3) nounwind { +define @intrinsic_vfmerge_vfm_nxv4f32_nxv4f32_f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -392,7 +394,7 @@ entry: %0, float %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -401,9 +403,9 @@ declare @llvm.riscv.vfmerge.nxv8f32.nxv8f32( , , , - i64); + iXLen); -define @intrinsic_vfmerge_vvm_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vfmerge_vvm_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vvm_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -414,7 +416,7 @@ entry: %0, %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -423,9 +425,9 @@ declare @llvm.riscv.vfmerge.nxv8f32.f32( , float, , - i64); + iXLen); -define @intrinsic_vfmerge_vfm_nxv8f32_nxv8f32_f32( %0, float %1, %2, i64 %3) nounwind { +define @intrinsic_vfmerge_vfm_nxv8f32_nxv8f32_f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -436,7 +438,7 @@ entry: %0, float %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -445,9 +447,9 @@ declare @llvm.riscv.vfmerge.nxv16f32.nxv16f32( , , , - i64); + iXLen); -define @intrinsic_vfmerge_vvm_nxv16f32_nxv16f32_nxv16f32( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vfmerge_vvm_nxv16f32_nxv16f32_nxv16f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vvm_nxv16f32_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu @@ -458,7 +460,7 @@ entry: %0, %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -467,9 +469,9 @@ declare @llvm.riscv.vfmerge.nxv16f32.f32( , float, , - i64); + iXLen); -define @intrinsic_vfmerge_vfm_nxv16f32_nxv16f32_f32( %0, float %1, %2, i64 %3) nounwind { +define @intrinsic_vfmerge_vfm_nxv16f32_nxv16f32_f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu @@ -480,7 +482,7 @@ entry: %0, float %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -489,9 +491,9 @@ declare @llvm.riscv.vfmerge.nxv1f64.nxv1f64( , , , - i64); + iXLen); -define @intrinsic_vfmerge_vvm_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vfmerge_vvm_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vvm_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu @@ -502,7 +504,7 @@ entry: %0, %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -511,9 +513,9 @@ declare @llvm.riscv.vfmerge.nxv1f64.f64( , double, , - i64); + iXLen); -define @intrinsic_vfmerge_vfm_nxv1f64_nxv1f64_f64( %0, double %1, %2, i64 %3) nounwind { +define @intrinsic_vfmerge_vfm_nxv1f64_nxv1f64_f64( %0, double %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu @@ -524,7 +526,7 @@ entry: %0, double %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -533,9 +535,9 @@ declare @llvm.riscv.vfmerge.nxv2f64.nxv2f64( , , , - i64); + iXLen); -define @intrinsic_vfmerge_vvm_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vfmerge_vvm_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vvm_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu @@ -546,7 +548,7 @@ entry: %0, %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -555,9 +557,9 @@ declare @llvm.riscv.vfmerge.nxv2f64.f64( , double, , - i64); + iXLen); -define @intrinsic_vfmerge_vfm_nxv2f64_nxv2f64_f64( %0, double %1, %2, i64 %3) nounwind { +define @intrinsic_vfmerge_vfm_nxv2f64_nxv2f64_f64( %0, double %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu @@ -568,7 +570,7 @@ entry: %0, double %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -577,9 +579,9 @@ declare @llvm.riscv.vfmerge.nxv4f64.nxv4f64( , , , - i64); + iXLen); -define @intrinsic_vfmerge_vvm_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vfmerge_vvm_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vvm_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu @@ -590,7 +592,7 @@ entry: %0, %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -599,9 +601,9 @@ declare @llvm.riscv.vfmerge.nxv4f64.f64( , double, , - i64); + iXLen); -define @intrinsic_vfmerge_vfm_nxv4f64_nxv4f64_f64( %0, double %1, %2, i64 %3) nounwind { +define @intrinsic_vfmerge_vfm_nxv4f64_nxv4f64_f64( %0, double %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu @@ -612,7 +614,7 @@ entry: %0, double %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -621,9 +623,9 @@ declare @llvm.riscv.vfmerge.nxv8f64.nxv8f64( , , , - i64); + iXLen); -define @intrinsic_vfmerge_vvm_nxv8f64_nxv8f64_nxv8f64( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vfmerge_vvm_nxv8f64_nxv8f64_nxv8f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vvm_nxv8f64_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu @@ -634,7 +636,7 @@ entry: %0, %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -643,9 +645,9 @@ declare @llvm.riscv.vfmerge.nxv8f64.f64( , double, , - i64); + iXLen); -define @intrinsic_vfmerge_vfm_nxv8f64_nxv8f64_f64( %0, double %1, %2, i64 %3) nounwind { +define @intrinsic_vfmerge_vfm_nxv8f64_nxv8f64_f64( %0, double %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu @@ -656,12 +658,12 @@ entry: %0, double %1, %2, - i64 %3) + iXLen %3) ret %a } -define @intrinsic_vfmerge_vzm_nxv1f16_nxv1f16_f16( %0, %1, i64 %2) nounwind { +define @intrinsic_vfmerge_vzm_nxv1f16_nxv1f16_f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -672,12 +674,12 @@ entry: %0, half zeroinitializer, %1, - i64 %2) + iXLen %2) ret %a } -define @intrinsic_vfmerge_vzm_nxv2f16_nxv2f16_f16( %0, %1, i64 %2) nounwind { +define @intrinsic_vfmerge_vzm_nxv2f16_nxv2f16_f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -688,12 +690,12 @@ entry: %0, half zeroinitializer, %1, - i64 %2) + iXLen %2) ret %a } -define @intrinsic_vfmerge_vzm_nxv4f16_nxv4f16_f16( %0, %1, i64 %2) nounwind { +define @intrinsic_vfmerge_vzm_nxv4f16_nxv4f16_f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -704,12 +706,12 @@ entry: %0, half zeroinitializer, %1, - i64 %2) + iXLen %2) ret %a } -define @intrinsic_vfmerge_vzm_nxv8f16_nxv8f16_f16( %0, %1, i64 %2) nounwind { +define @intrinsic_vfmerge_vzm_nxv8f16_nxv8f16_f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -720,12 +722,12 @@ entry: %0, half zeroinitializer, %1, - i64 %2) + iXLen %2) ret %a } -define @intrinsic_vfmerge_vzm_nxv16f16_nxv16f16_f16( %0, %1, i64 %2) nounwind { +define @intrinsic_vfmerge_vzm_nxv16f16_nxv16f16_f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -736,12 +738,12 @@ entry: %0, half zeroinitializer, %1, - i64 %2) + iXLen %2) ret %a } -define @intrinsic_vfmerge_vzm_nxv32f16_nxv32f16_f16( %0, %1, i64 %2) nounwind { +define @intrinsic_vfmerge_vzm_nxv32f16_nxv32f16_f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu @@ -752,12 +754,12 @@ entry: %0, half zeroinitializer, %1, - i64 %2) + iXLen %2) ret %a } -define @intrinsic_vfmerge_vzm_nxv1f32_nxv1f32_f32( %0, %1, i64 %2) nounwind { +define @intrinsic_vfmerge_vzm_nxv1f32_nxv1f32_f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -768,12 +770,12 @@ entry: %0, float zeroinitializer, %1, - i64 %2) + iXLen %2) ret %a } -define @intrinsic_vfmerge_vzm_nxv2f32_nxv2f32_f32( %0, %1, i64 %2) nounwind { +define @intrinsic_vfmerge_vzm_nxv2f32_nxv2f32_f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -784,12 +786,12 @@ entry: %0, float zeroinitializer, %1, - i64 %2) + iXLen %2) ret %a } -define @intrinsic_vfmerge_vzm_nxv4f32_nxv4f32_f32( %0, %1, i64 %2) nounwind { +define @intrinsic_vfmerge_vzm_nxv4f32_nxv4f32_f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -800,12 +802,12 @@ entry: %0, float zeroinitializer, %1, - i64 %2) + iXLen %2) ret %a } -define @intrinsic_vfmerge_vzm_nxv8f32_nxv8f32_f32( %0, %1, i64 %2) nounwind { +define @intrinsic_vfmerge_vzm_nxv8f32_nxv8f32_f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -816,12 +818,12 @@ entry: %0, float zeroinitializer, %1, - i64 %2) + iXLen %2) ret %a } -define @intrinsic_vfmerge_vzm_nxv16f32_nxv16f32_f32( %0, %1, i64 %2) nounwind { +define @intrinsic_vfmerge_vzm_nxv16f32_nxv16f32_f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu @@ -832,12 +834,12 @@ entry: %0, float zeroinitializer, %1, - i64 %2) + iXLen %2) ret %a } -define @intrinsic_vfmerge_vzm_nxv1f64_nxv1f64_f64( %0, %1, i64 %2) nounwind { +define @intrinsic_vfmerge_vzm_nxv1f64_nxv1f64_f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu @@ -848,12 +850,12 @@ entry: %0, double zeroinitializer, %1, - i64 %2) + iXLen %2) ret %a } -define @intrinsic_vfmerge_vzm_nxv2f64_nxv2f64_f64( %0, %1, i64 %2) nounwind { +define @intrinsic_vfmerge_vzm_nxv2f64_nxv2f64_f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu @@ -864,12 +866,12 @@ entry: %0, double zeroinitializer, %1, - i64 %2) + iXLen %2) ret %a } -define @intrinsic_vfmerge_vzm_nxv4f64_nxv4f64_f64( %0, %1, i64 %2) nounwind { +define @intrinsic_vfmerge_vzm_nxv4f64_nxv4f64_f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu @@ -880,12 +882,12 @@ entry: %0, double zeroinitializer, %1, - i64 %2) + iXLen %2) ret %a } -define @intrinsic_vfmerge_vzm_nxv8f64_nxv8f64_f64( %0, %1, i64 %2) nounwind { +define @intrinsic_vfmerge_vzm_nxv8f64_nxv8f64_f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu @@ -896,7 +898,7 @@ entry: %0, double zeroinitializer, %1, - i64 %2) + iXLen %2) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmin-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfmin-rv32.ll deleted file mode 100644 index 0861a787440e..000000000000 --- a/llvm/test/CodeGen/RISCV/rvv/vfmin-rv32.ll +++ /dev/null @@ -1,1355 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: -target-abi=ilp32d < %s | FileCheck %s -declare @llvm.riscv.vfmin.nxv1f16.nxv1f16( - , - , - i32); - -define @intrinsic_vfmin_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmin_vv_nxv1f16_nxv1f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfmin.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmin.nxv1f16.nxv1f16( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfmin.mask.nxv1f16.nxv1f16( - , - , - , - , - i32, - i32); - -define @intrinsic_vfmin_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv1f16_nxv1f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfmin.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmin.mask.nxv1f16.nxv1f16( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfmin.nxv2f16.nxv2f16( - , - , - i32); - -define @intrinsic_vfmin_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmin_vv_nxv2f16_nxv2f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfmin.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmin.nxv2f16.nxv2f16( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfmin.mask.nxv2f16.nxv2f16( - , - , - , - , - i32, - i32); - -define @intrinsic_vfmin_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv2f16_nxv2f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfmin.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmin.mask.nxv2f16.nxv2f16( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfmin.nxv4f16.nxv4f16( - , - , - i32); - -define @intrinsic_vfmin_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmin_vv_nxv4f16_nxv4f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfmin.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmin.nxv4f16.nxv4f16( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfmin.mask.nxv4f16.nxv4f16( - , - , - , - , - i32, - i32); - -define @intrinsic_vfmin_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv4f16_nxv4f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfmin.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmin.mask.nxv4f16.nxv4f16( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfmin.nxv8f16.nxv8f16( - , - , - i32); - -define @intrinsic_vfmin_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmin_vv_nxv8f16_nxv8f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfmin.vv v8, v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmin.nxv8f16.nxv8f16( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfmin.mask.nxv8f16.nxv8f16( - , - , - , - , - i32, - i32); - -define @intrinsic_vfmin_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv8f16_nxv8f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfmin.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmin.mask.nxv8f16.nxv8f16( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfmin.nxv16f16.nxv16f16( - , - , - i32); - -define @intrinsic_vfmin_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmin_vv_nxv16f16_nxv16f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfmin.vv v8, v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmin.nxv16f16.nxv16f16( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfmin.mask.nxv16f16.nxv16f16( - , - , - , - , - i32, - i32); - -define @intrinsic_vfmin_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv16f16_nxv16f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfmin.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmin.mask.nxv16f16.nxv16f16( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfmin.nxv32f16.nxv32f16( - , - , - i32); - -define @intrinsic_vfmin_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmin_vv_nxv32f16_nxv32f16_nxv32f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu -; CHECK-NEXT: vfmin.vv v8, v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmin.nxv32f16.nxv32f16( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfmin.mask.nxv32f16.nxv32f16( - , - , - , - , - i32, - i32); - -define @intrinsic_vfmin_mask_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv32f16_nxv32f16_nxv32f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu -; CHECK-NEXT: vfmin.vv v8, v16, v24, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmin.mask.nxv32f16.nxv32f16( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfmin.nxv1f32.nxv1f32( - , - , - i32); - -define @intrinsic_vfmin_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmin_vv_nxv1f32_nxv1f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfmin.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmin.nxv1f32.nxv1f32( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfmin.mask.nxv1f32.nxv1f32( - , - , - , - , - i32, - i32); - -define @intrinsic_vfmin_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv1f32_nxv1f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfmin.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmin.mask.nxv1f32.nxv1f32( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfmin.nxv2f32.nxv2f32( - , - , - i32); - -define @intrinsic_vfmin_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmin_vv_nxv2f32_nxv2f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfmin.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmin.nxv2f32.nxv2f32( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfmin.mask.nxv2f32.nxv2f32( - , - , - , - , - i32, - i32); - -define @intrinsic_vfmin_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv2f32_nxv2f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfmin.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmin.mask.nxv2f32.nxv2f32( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfmin.nxv4f32.nxv4f32( - , - , - i32); - -define @intrinsic_vfmin_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmin_vv_nxv4f32_nxv4f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfmin.vv v8, v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmin.nxv4f32.nxv4f32( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfmin.mask.nxv4f32.nxv4f32( - , - , - , - , - i32, - i32); - -define @intrinsic_vfmin_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv4f32_nxv4f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfmin.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmin.mask.nxv4f32.nxv4f32( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfmin.nxv8f32.nxv8f32( - , - , - i32); - -define @intrinsic_vfmin_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmin_vv_nxv8f32_nxv8f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfmin.vv v8, v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmin.nxv8f32.nxv8f32( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfmin.mask.nxv8f32.nxv8f32( - , - , - , - , - i32, - i32); - -define @intrinsic_vfmin_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv8f32_nxv8f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfmin.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmin.mask.nxv8f32.nxv8f32( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfmin.nxv16f32.nxv16f32( - , - , - i32); - -define @intrinsic_vfmin_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmin_vv_nxv16f32_nxv16f32_nxv16f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu -; CHECK-NEXT: vfmin.vv v8, v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmin.nxv16f32.nxv16f32( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfmin.mask.nxv16f32.nxv16f32( - , - , - , - , - i32, - i32); - -define @intrinsic_vfmin_mask_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv16f32_nxv16f32_nxv16f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vl8re32.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu -; CHECK-NEXT: vfmin.vv v8, v16, v24, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmin.mask.nxv16f32.nxv16f32( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfmin.nxv1f64.nxv1f64( - , - , - i32); - -define @intrinsic_vfmin_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmin_vv_nxv1f64_nxv1f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vfmin.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmin.nxv1f64.nxv1f64( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfmin.mask.nxv1f64.nxv1f64( - , - , - , - , - i32, - i32); - -define @intrinsic_vfmin_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv1f64_nxv1f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vfmin.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmin.mask.nxv1f64.nxv1f64( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfmin.nxv2f64.nxv2f64( - , - , - i32); - -define @intrinsic_vfmin_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmin_vv_nxv2f64_nxv2f64_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vfmin.vv v8, v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmin.nxv2f64.nxv2f64( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfmin.mask.nxv2f64.nxv2f64( - , - , - , - , - i32, - i32); - -define @intrinsic_vfmin_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv2f64_nxv2f64_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vfmin.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmin.mask.nxv2f64.nxv2f64( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfmin.nxv4f64.nxv4f64( - , - , - i32); - -define @intrinsic_vfmin_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmin_vv_nxv4f64_nxv4f64_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vfmin.vv v8, v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmin.nxv4f64.nxv4f64( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfmin.mask.nxv4f64.nxv4f64( - , - , - , - , - i32, - i32); - -define @intrinsic_vfmin_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv4f64_nxv4f64_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vfmin.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmin.mask.nxv4f64.nxv4f64( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfmin.nxv8f64.nxv8f64( - , - , - i32); - -define @intrinsic_vfmin_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmin_vv_nxv8f64_nxv8f64_nxv8f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu -; CHECK-NEXT: vfmin.vv v8, v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmin.nxv8f64.nxv8f64( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfmin.mask.nxv8f64.nxv8f64( - , - , - , - , - i32, - i32); - -define @intrinsic_vfmin_mask_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv8f64_nxv8f64_nxv8f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vl8re64.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu -; CHECK-NEXT: vfmin.vv v8, v16, v24, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmin.mask.nxv8f64.nxv8f64( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfmin.nxv1f16.f16( - , - half, - i32); - -define @intrinsic_vfmin_vf_nxv1f16_nxv1f16_f16( %0, half %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmin_vf_nxv1f16_nxv1f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfmin.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmin.nxv1f16.f16( - %0, - half %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfmin.mask.nxv1f16.f16( - , - , - half, - , - i32, - i32); - -define @intrinsic_vfmin_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv1f16_nxv1f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfmin.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmin.mask.nxv1f16.f16( - %0, - %1, - half %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfmin.nxv2f16.f16( - , - half, - i32); - -define @intrinsic_vfmin_vf_nxv2f16_nxv2f16_f16( %0, half %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmin_vf_nxv2f16_nxv2f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfmin.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmin.nxv2f16.f16( - %0, - half %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfmin.mask.nxv2f16.f16( - , - , - half, - , - i32, - i32); - -define @intrinsic_vfmin_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv2f16_nxv2f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfmin.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmin.mask.nxv2f16.f16( - %0, - %1, - half %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfmin.nxv4f16.f16( - , - half, - i32); - -define @intrinsic_vfmin_vf_nxv4f16_nxv4f16_f16( %0, half %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmin_vf_nxv4f16_nxv4f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfmin.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmin.nxv4f16.f16( - %0, - half %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfmin.mask.nxv4f16.f16( - , - , - half, - , - i32, - i32); - -define @intrinsic_vfmin_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv4f16_nxv4f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfmin.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmin.mask.nxv4f16.f16( - %0, - %1, - half %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfmin.nxv8f16.f16( - , - half, - i32); - -define @intrinsic_vfmin_vf_nxv8f16_nxv8f16_f16( %0, half %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmin_vf_nxv8f16_nxv8f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfmin.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmin.nxv8f16.f16( - %0, - half %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfmin.mask.nxv8f16.f16( - , - , - half, - , - i32, - i32); - -define @intrinsic_vfmin_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv8f16_nxv8f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfmin.vf v8, v10, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmin.mask.nxv8f16.f16( - %0, - %1, - half %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfmin.nxv16f16.f16( - , - half, - i32); - -define @intrinsic_vfmin_vf_nxv16f16_nxv16f16_f16( %0, half %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmin_vf_nxv16f16_nxv16f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfmin.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmin.nxv16f16.f16( - %0, - half %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfmin.mask.nxv16f16.f16( - , - , - half, - , - i32, - i32); - -define @intrinsic_vfmin_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv16f16_nxv16f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfmin.vf v8, v12, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmin.mask.nxv16f16.f16( - %0, - %1, - half %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfmin.nxv32f16.f16( - , - half, - i32); - -define @intrinsic_vfmin_vf_nxv32f16_nxv32f16_f16( %0, half %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmin_vf_nxv32f16_nxv32f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu -; CHECK-NEXT: vfmin.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmin.nxv32f16.f16( - %0, - half %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfmin.mask.nxv32f16.f16( - , - , - half, - , - i32, - i32); - -define @intrinsic_vfmin_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv32f16_nxv32f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu -; CHECK-NEXT: vfmin.vf v8, v16, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmin.mask.nxv32f16.f16( - %0, - %1, - half %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfmin.nxv1f32.f32( - , - float, - i32); - -define @intrinsic_vfmin_vf_nxv1f32_nxv1f32_f32( %0, float %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmin_vf_nxv1f32_nxv1f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfmin.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmin.nxv1f32.f32( - %0, - float %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfmin.mask.nxv1f32.f32( - , - , - float, - , - i32, - i32); - -define @intrinsic_vfmin_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv1f32_nxv1f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfmin.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmin.mask.nxv1f32.f32( - %0, - %1, - float %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfmin.nxv2f32.f32( - , - float, - i32); - -define @intrinsic_vfmin_vf_nxv2f32_nxv2f32_f32( %0, float %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmin_vf_nxv2f32_nxv2f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfmin.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmin.nxv2f32.f32( - %0, - float %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfmin.mask.nxv2f32.f32( - , - , - float, - , - i32, - i32); - -define @intrinsic_vfmin_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv2f32_nxv2f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfmin.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmin.mask.nxv2f32.f32( - %0, - %1, - float %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfmin.nxv4f32.f32( - , - float, - i32); - -define @intrinsic_vfmin_vf_nxv4f32_nxv4f32_f32( %0, float %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmin_vf_nxv4f32_nxv4f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfmin.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmin.nxv4f32.f32( - %0, - float %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfmin.mask.nxv4f32.f32( - , - , - float, - , - i32, - i32); - -define @intrinsic_vfmin_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv4f32_nxv4f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfmin.vf v8, v10, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmin.mask.nxv4f32.f32( - %0, - %1, - float %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfmin.nxv8f32.f32( - , - float, - i32); - -define @intrinsic_vfmin_vf_nxv8f32_nxv8f32_f32( %0, float %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmin_vf_nxv8f32_nxv8f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfmin.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmin.nxv8f32.f32( - %0, - float %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfmin.mask.nxv8f32.f32( - , - , - float, - , - i32, - i32); - -define @intrinsic_vfmin_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv8f32_nxv8f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfmin.vf v8, v12, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmin.mask.nxv8f32.f32( - %0, - %1, - float %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfmin.nxv16f32.f32( - , - float, - i32); - -define @intrinsic_vfmin_vf_nxv16f32_nxv16f32_f32( %0, float %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmin_vf_nxv16f32_nxv16f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu -; CHECK-NEXT: vfmin.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmin.nxv16f32.f32( - %0, - float %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfmin.mask.nxv16f32.f32( - , - , - float, - , - i32, - i32); - -define @intrinsic_vfmin_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv16f32_nxv16f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu -; CHECK-NEXT: vfmin.vf v8, v16, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmin.mask.nxv16f32.f32( - %0, - %1, - float %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfmin.nxv1f64.f64( - , - double, - i32); - -define @intrinsic_vfmin_vf_nxv1f64_nxv1f64_f64( %0, double %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmin_vf_nxv1f64_nxv1f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vfmin.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmin.nxv1f64.f64( - %0, - double %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfmin.mask.nxv1f64.f64( - , - , - double, - , - i32, - i32); - -define @intrinsic_vfmin_mask_vf_nxv1f64_nxv1f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv1f64_nxv1f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vfmin.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmin.mask.nxv1f64.f64( - %0, - %1, - double %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfmin.nxv2f64.f64( - , - double, - i32); - -define @intrinsic_vfmin_vf_nxv2f64_nxv2f64_f64( %0, double %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmin_vf_nxv2f64_nxv2f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vfmin.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmin.nxv2f64.f64( - %0, - double %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfmin.mask.nxv2f64.f64( - , - , - double, - , - i32, - i32); - -define @intrinsic_vfmin_mask_vf_nxv2f64_nxv2f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv2f64_nxv2f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vfmin.vf v8, v10, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmin.mask.nxv2f64.f64( - %0, - %1, - double %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfmin.nxv4f64.f64( - , - double, - i32); - -define @intrinsic_vfmin_vf_nxv4f64_nxv4f64_f64( %0, double %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmin_vf_nxv4f64_nxv4f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vfmin.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmin.nxv4f64.f64( - %0, - double %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfmin.mask.nxv4f64.f64( - , - , - double, - , - i32, - i32); - -define @intrinsic_vfmin_mask_vf_nxv4f64_nxv4f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv4f64_nxv4f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vfmin.vf v8, v12, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmin.mask.nxv4f64.f64( - %0, - %1, - double %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfmin.nxv8f64.f64( - , - double, - i32); - -define @intrinsic_vfmin_vf_nxv8f64_nxv8f64_f64( %0, double %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmin_vf_nxv8f64_nxv8f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu -; CHECK-NEXT: vfmin.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmin.nxv8f64.f64( - %0, - double %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfmin.mask.nxv8f64.f64( - , - , - double, - , - i32, - i32); - -define @intrinsic_vfmin_mask_vf_nxv8f64_nxv8f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv8f64_nxv8f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu -; CHECK-NEXT: vfmin.vf v8, v16, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmin.mask.nxv8f64.f64( - %0, - %1, - double %2, - %3, - i32 %4, i32 1) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmin-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfmin.ll similarity index 86% rename from llvm/test/CodeGen/RISCV/rvv/vfmin-rv64.ll rename to llvm/test/CodeGen/RISCV/rvv/vfmin.ll index e647fe51ffb1..e151e9fb695d 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfmin-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmin.ll @@ -1,12 +1,14 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: -target-abi=lp64d < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfmin.nxv1f16.nxv1f16( , , - i64); + iXLen); -define @intrinsic_vfmin_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, i64 %2) nounwind { +define @intrinsic_vfmin_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -16,7 +18,7 @@ entry: %a = call @llvm.riscv.vfmin.nxv1f16.nxv1f16( %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -26,10 +28,10 @@ declare @llvm.riscv.vfmin.mask.nxv1f16.nxv1f16( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfmin_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfmin_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -41,7 +43,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -49,9 +51,9 @@ entry: declare @llvm.riscv.vfmin.nxv2f16.nxv2f16( , , - i64); + iXLen); -define @intrinsic_vfmin_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, i64 %2) nounwind { +define @intrinsic_vfmin_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -61,7 +63,7 @@ entry: %a = call @llvm.riscv.vfmin.nxv2f16.nxv2f16( %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -71,10 +73,10 @@ declare @llvm.riscv.vfmin.mask.nxv2f16.nxv2f16( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfmin_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfmin_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -86,7 +88,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -94,9 +96,9 @@ entry: declare @llvm.riscv.vfmin.nxv4f16.nxv4f16( , , - i64); + iXLen); -define @intrinsic_vfmin_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, i64 %2) nounwind { +define @intrinsic_vfmin_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -106,7 +108,7 @@ entry: %a = call @llvm.riscv.vfmin.nxv4f16.nxv4f16( %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -116,10 +118,10 @@ declare @llvm.riscv.vfmin.mask.nxv4f16.nxv4f16( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfmin_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfmin_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -131,7 +133,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -139,9 +141,9 @@ entry: declare @llvm.riscv.vfmin.nxv8f16.nxv8f16( , , - i64); + iXLen); -define @intrinsic_vfmin_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, i64 %2) nounwind { +define @intrinsic_vfmin_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -151,7 +153,7 @@ entry: %a = call @llvm.riscv.vfmin.nxv8f16.nxv8f16( %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -161,10 +163,10 @@ declare @llvm.riscv.vfmin.mask.nxv8f16.nxv8f16( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfmin_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfmin_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -176,7 +178,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -184,9 +186,9 @@ entry: declare @llvm.riscv.vfmin.nxv16f16.nxv16f16( , , - i64); + iXLen); -define @intrinsic_vfmin_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, i64 %2) nounwind { +define @intrinsic_vfmin_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -196,7 +198,7 @@ entry: %a = call @llvm.riscv.vfmin.nxv16f16.nxv16f16( %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -206,10 +208,10 @@ declare @llvm.riscv.vfmin.mask.nxv16f16.nxv16f16( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfmin_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfmin_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -221,7 +223,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -229,9 +231,9 @@ entry: declare @llvm.riscv.vfmin.nxv32f16.nxv32f16( , , - i64); + iXLen); -define @intrinsic_vfmin_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, i64 %2) nounwind { +define @intrinsic_vfmin_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vv_nxv32f16_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu @@ -241,7 +243,7 @@ entry: %a = call @llvm.riscv.vfmin.nxv32f16.nxv32f16( %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -251,10 +253,10 @@ declare @llvm.riscv.vfmin.mask.nxv32f16.nxv32f16( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfmin_mask_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfmin_mask_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv32f16_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re16.v v24, (a0) @@ -267,7 +269,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -275,9 +277,9 @@ entry: declare @llvm.riscv.vfmin.nxv1f32.nxv1f32( , , - i64); + iXLen); -define @intrinsic_vfmin_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, i64 %2) nounwind { +define @intrinsic_vfmin_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -287,7 +289,7 @@ entry: %a = call @llvm.riscv.vfmin.nxv1f32.nxv1f32( %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -297,10 +299,10 @@ declare @llvm.riscv.vfmin.mask.nxv1f32.nxv1f32( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfmin_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfmin_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -312,7 +314,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -320,9 +322,9 @@ entry: declare @llvm.riscv.vfmin.nxv2f32.nxv2f32( , , - i64); + iXLen); -define @intrinsic_vfmin_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, i64 %2) nounwind { +define @intrinsic_vfmin_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -332,7 +334,7 @@ entry: %a = call @llvm.riscv.vfmin.nxv2f32.nxv2f32( %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -342,10 +344,10 @@ declare @llvm.riscv.vfmin.mask.nxv2f32.nxv2f32( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfmin_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfmin_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -357,7 +359,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -365,9 +367,9 @@ entry: declare @llvm.riscv.vfmin.nxv4f32.nxv4f32( , , - i64); + iXLen); -define @intrinsic_vfmin_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, i64 %2) nounwind { +define @intrinsic_vfmin_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -377,7 +379,7 @@ entry: %a = call @llvm.riscv.vfmin.nxv4f32.nxv4f32( %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -387,10 +389,10 @@ declare @llvm.riscv.vfmin.mask.nxv4f32.nxv4f32( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfmin_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfmin_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -402,7 +404,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -410,9 +412,9 @@ entry: declare @llvm.riscv.vfmin.nxv8f32.nxv8f32( , , - i64); + iXLen); -define @intrinsic_vfmin_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, i64 %2) nounwind { +define @intrinsic_vfmin_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -422,7 +424,7 @@ entry: %a = call @llvm.riscv.vfmin.nxv8f32.nxv8f32( %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -432,10 +434,10 @@ declare @llvm.riscv.vfmin.mask.nxv8f32.nxv8f32( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfmin_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfmin_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -447,7 +449,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -455,9 +457,9 @@ entry: declare @llvm.riscv.vfmin.nxv16f32.nxv16f32( , , - i64); + iXLen); -define @intrinsic_vfmin_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, i64 %2) nounwind { +define @intrinsic_vfmin_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vv_nxv16f32_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu @@ -467,7 +469,7 @@ entry: %a = call @llvm.riscv.vfmin.nxv16f32.nxv16f32( %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -477,10 +479,10 @@ declare @llvm.riscv.vfmin.mask.nxv16f32.nxv16f32( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfmin_mask_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfmin_mask_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv16f32_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re32.v v24, (a0) @@ -493,7 +495,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -501,9 +503,9 @@ entry: declare @llvm.riscv.vfmin.nxv1f64.nxv1f64( , , - i64); + iXLen); -define @intrinsic_vfmin_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, i64 %2) nounwind { +define @intrinsic_vfmin_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu @@ -513,7 +515,7 @@ entry: %a = call @llvm.riscv.vfmin.nxv1f64.nxv1f64( %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -523,10 +525,10 @@ declare @llvm.riscv.vfmin.mask.nxv1f64.nxv1f64( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfmin_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfmin_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu @@ -538,7 +540,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -546,9 +548,9 @@ entry: declare @llvm.riscv.vfmin.nxv2f64.nxv2f64( , , - i64); + iXLen); -define @intrinsic_vfmin_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, i64 %2) nounwind { +define @intrinsic_vfmin_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu @@ -558,7 +560,7 @@ entry: %a = call @llvm.riscv.vfmin.nxv2f64.nxv2f64( %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -568,10 +570,10 @@ declare @llvm.riscv.vfmin.mask.nxv2f64.nxv2f64( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfmin_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfmin_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu @@ -583,7 +585,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -591,9 +593,9 @@ entry: declare @llvm.riscv.vfmin.nxv4f64.nxv4f64( , , - i64); + iXLen); -define @intrinsic_vfmin_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, i64 %2) nounwind { +define @intrinsic_vfmin_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu @@ -603,7 +605,7 @@ entry: %a = call @llvm.riscv.vfmin.nxv4f64.nxv4f64( %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -613,10 +615,10 @@ declare @llvm.riscv.vfmin.mask.nxv4f64.nxv4f64( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfmin_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfmin_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu @@ -628,7 +630,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -636,9 +638,9 @@ entry: declare @llvm.riscv.vfmin.nxv8f64.nxv8f64( , , - i64); + iXLen); -define @intrinsic_vfmin_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, i64 %2) nounwind { +define @intrinsic_vfmin_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vv_nxv8f64_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu @@ -648,7 +650,7 @@ entry: %a = call @llvm.riscv.vfmin.nxv8f64.nxv8f64( %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -658,10 +660,10 @@ declare @llvm.riscv.vfmin.mask.nxv8f64.nxv8f64( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfmin_mask_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfmin_mask_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv8f64_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re64.v v24, (a0) @@ -674,7 +676,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -682,9 +684,9 @@ entry: declare @llvm.riscv.vfmin.nxv1f16.f16( , half, - i64); + iXLen); -define @intrinsic_vfmin_vf_nxv1f16_nxv1f16_f16( %0, half %1, i64 %2) nounwind { +define @intrinsic_vfmin_vf_nxv1f16_nxv1f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -694,7 +696,7 @@ entry: %a = call @llvm.riscv.vfmin.nxv1f16.f16( %0, half %1, - i64 %2) + iXLen %2) ret %a } @@ -704,10 +706,10 @@ declare @llvm.riscv.vfmin.mask.nxv1f16.f16( , half, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfmin_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { +define @intrinsic_vfmin_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -719,7 +721,7 @@ entry: %1, half %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -727,9 +729,9 @@ entry: declare @llvm.riscv.vfmin.nxv2f16.f16( , half, - i64); + iXLen); -define @intrinsic_vfmin_vf_nxv2f16_nxv2f16_f16( %0, half %1, i64 %2) nounwind { +define @intrinsic_vfmin_vf_nxv2f16_nxv2f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -739,7 +741,7 @@ entry: %a = call @llvm.riscv.vfmin.nxv2f16.f16( %0, half %1, - i64 %2) + iXLen %2) ret %a } @@ -749,10 +751,10 @@ declare @llvm.riscv.vfmin.mask.nxv2f16.f16( , half, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfmin_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { +define @intrinsic_vfmin_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -764,7 +766,7 @@ entry: %1, half %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -772,9 +774,9 @@ entry: declare @llvm.riscv.vfmin.nxv4f16.f16( , half, - i64); + iXLen); -define @intrinsic_vfmin_vf_nxv4f16_nxv4f16_f16( %0, half %1, i64 %2) nounwind { +define @intrinsic_vfmin_vf_nxv4f16_nxv4f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -784,7 +786,7 @@ entry: %a = call @llvm.riscv.vfmin.nxv4f16.f16( %0, half %1, - i64 %2) + iXLen %2) ret %a } @@ -794,10 +796,10 @@ declare @llvm.riscv.vfmin.mask.nxv4f16.f16( , half, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfmin_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { +define @intrinsic_vfmin_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -809,7 +811,7 @@ entry: %1, half %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -817,9 +819,9 @@ entry: declare @llvm.riscv.vfmin.nxv8f16.f16( , half, - i64); + iXLen); -define @intrinsic_vfmin_vf_nxv8f16_nxv8f16_f16( %0, half %1, i64 %2) nounwind { +define @intrinsic_vfmin_vf_nxv8f16_nxv8f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -829,7 +831,7 @@ entry: %a = call @llvm.riscv.vfmin.nxv8f16.f16( %0, half %1, - i64 %2) + iXLen %2) ret %a } @@ -839,10 +841,10 @@ declare @llvm.riscv.vfmin.mask.nxv8f16.f16( , half, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfmin_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { +define @intrinsic_vfmin_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -854,7 +856,7 @@ entry: %1, half %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -862,9 +864,9 @@ entry: declare @llvm.riscv.vfmin.nxv16f16.f16( , half, - i64); + iXLen); -define @intrinsic_vfmin_vf_nxv16f16_nxv16f16_f16( %0, half %1, i64 %2) nounwind { +define @intrinsic_vfmin_vf_nxv16f16_nxv16f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -874,7 +876,7 @@ entry: %a = call @llvm.riscv.vfmin.nxv16f16.f16( %0, half %1, - i64 %2) + iXLen %2) ret %a } @@ -884,10 +886,10 @@ declare @llvm.riscv.vfmin.mask.nxv16f16.f16( , half, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfmin_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { +define @intrinsic_vfmin_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -899,7 +901,7 @@ entry: %1, half %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -907,9 +909,9 @@ entry: declare @llvm.riscv.vfmin.nxv32f16.f16( , half, - i64); + iXLen); -define @intrinsic_vfmin_vf_nxv32f16_nxv32f16_f16( %0, half %1, i64 %2) nounwind { +define @intrinsic_vfmin_vf_nxv32f16_nxv32f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu @@ -919,7 +921,7 @@ entry: %a = call @llvm.riscv.vfmin.nxv32f16.f16( %0, half %1, - i64 %2) + iXLen %2) ret %a } @@ -929,10 +931,10 @@ declare @llvm.riscv.vfmin.mask.nxv32f16.f16( , half, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfmin_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { +define @intrinsic_vfmin_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu @@ -944,7 +946,7 @@ entry: %1, half %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -952,9 +954,9 @@ entry: declare @llvm.riscv.vfmin.nxv1f32.f32( , float, - i64); + iXLen); -define @intrinsic_vfmin_vf_nxv1f32_nxv1f32_f32( %0, float %1, i64 %2) nounwind { +define @intrinsic_vfmin_vf_nxv1f32_nxv1f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -964,7 +966,7 @@ entry: %a = call @llvm.riscv.vfmin.nxv1f32.f32( %0, float %1, - i64 %2) + iXLen %2) ret %a } @@ -974,10 +976,10 @@ declare @llvm.riscv.vfmin.mask.nxv1f32.f32( , float, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfmin_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { +define @intrinsic_vfmin_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -989,7 +991,7 @@ entry: %1, float %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -997,9 +999,9 @@ entry: declare @llvm.riscv.vfmin.nxv2f32.f32( , float, - i64); + iXLen); -define @intrinsic_vfmin_vf_nxv2f32_nxv2f32_f32( %0, float %1, i64 %2) nounwind { +define @intrinsic_vfmin_vf_nxv2f32_nxv2f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -1009,7 +1011,7 @@ entry: %a = call @llvm.riscv.vfmin.nxv2f32.f32( %0, float %1, - i64 %2) + iXLen %2) ret %a } @@ -1019,10 +1021,10 @@ declare @llvm.riscv.vfmin.mask.nxv2f32.f32( , float, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfmin_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { +define @intrinsic_vfmin_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -1034,7 +1036,7 @@ entry: %1, float %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -1042,9 +1044,9 @@ entry: declare @llvm.riscv.vfmin.nxv4f32.f32( , float, - i64); + iXLen); -define @intrinsic_vfmin_vf_nxv4f32_nxv4f32_f32( %0, float %1, i64 %2) nounwind { +define @intrinsic_vfmin_vf_nxv4f32_nxv4f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -1054,7 +1056,7 @@ entry: %a = call @llvm.riscv.vfmin.nxv4f32.f32( %0, float %1, - i64 %2) + iXLen %2) ret %a } @@ -1064,10 +1066,10 @@ declare @llvm.riscv.vfmin.mask.nxv4f32.f32( , float, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfmin_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { +define @intrinsic_vfmin_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -1079,7 +1081,7 @@ entry: %1, float %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -1087,9 +1089,9 @@ entry: declare @llvm.riscv.vfmin.nxv8f32.f32( , float, - i64); + iXLen); -define @intrinsic_vfmin_vf_nxv8f32_nxv8f32_f32( %0, float %1, i64 %2) nounwind { +define @intrinsic_vfmin_vf_nxv8f32_nxv8f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -1099,7 +1101,7 @@ entry: %a = call @llvm.riscv.vfmin.nxv8f32.f32( %0, float %1, - i64 %2) + iXLen %2) ret %a } @@ -1109,10 +1111,10 @@ declare @llvm.riscv.vfmin.mask.nxv8f32.f32( , float, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfmin_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { +define @intrinsic_vfmin_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -1124,7 +1126,7 @@ entry: %1, float %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -1132,9 +1134,9 @@ entry: declare @llvm.riscv.vfmin.nxv16f32.f32( , float, - i64); + iXLen); -define @intrinsic_vfmin_vf_nxv16f32_nxv16f32_f32( %0, float %1, i64 %2) nounwind { +define @intrinsic_vfmin_vf_nxv16f32_nxv16f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu @@ -1144,7 +1146,7 @@ entry: %a = call @llvm.riscv.vfmin.nxv16f32.f32( %0, float %1, - i64 %2) + iXLen %2) ret %a } @@ -1154,10 +1156,10 @@ declare @llvm.riscv.vfmin.mask.nxv16f32.f32( , float, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfmin_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { +define @intrinsic_vfmin_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu @@ -1169,7 +1171,7 @@ entry: %1, float %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -1177,9 +1179,9 @@ entry: declare @llvm.riscv.vfmin.nxv1f64.f64( , double, - i64); + iXLen); -define @intrinsic_vfmin_vf_nxv1f64_nxv1f64_f64( %0, double %1, i64 %2) nounwind { +define @intrinsic_vfmin_vf_nxv1f64_nxv1f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu @@ -1189,7 +1191,7 @@ entry: %a = call @llvm.riscv.vfmin.nxv1f64.f64( %0, double %1, - i64 %2) + iXLen %2) ret %a } @@ -1199,10 +1201,10 @@ declare @llvm.riscv.vfmin.mask.nxv1f64.f64( , double, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfmin_mask_vf_nxv1f64_nxv1f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { +define @intrinsic_vfmin_mask_vf_nxv1f64_nxv1f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu @@ -1214,7 +1216,7 @@ entry: %1, double %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -1222,9 +1224,9 @@ entry: declare @llvm.riscv.vfmin.nxv2f64.f64( , double, - i64); + iXLen); -define @intrinsic_vfmin_vf_nxv2f64_nxv2f64_f64( %0, double %1, i64 %2) nounwind { +define @intrinsic_vfmin_vf_nxv2f64_nxv2f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu @@ -1234,7 +1236,7 @@ entry: %a = call @llvm.riscv.vfmin.nxv2f64.f64( %0, double %1, - i64 %2) + iXLen %2) ret %a } @@ -1244,10 +1246,10 @@ declare @llvm.riscv.vfmin.mask.nxv2f64.f64( , double, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfmin_mask_vf_nxv2f64_nxv2f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { +define @intrinsic_vfmin_mask_vf_nxv2f64_nxv2f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu @@ -1259,7 +1261,7 @@ entry: %1, double %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -1267,9 +1269,9 @@ entry: declare @llvm.riscv.vfmin.nxv4f64.f64( , double, - i64); + iXLen); -define @intrinsic_vfmin_vf_nxv4f64_nxv4f64_f64( %0, double %1, i64 %2) nounwind { +define @intrinsic_vfmin_vf_nxv4f64_nxv4f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu @@ -1279,7 +1281,7 @@ entry: %a = call @llvm.riscv.vfmin.nxv4f64.f64( %0, double %1, - i64 %2) + iXLen %2) ret %a } @@ -1289,10 +1291,10 @@ declare @llvm.riscv.vfmin.mask.nxv4f64.f64( , double, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfmin_mask_vf_nxv4f64_nxv4f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { +define @intrinsic_vfmin_mask_vf_nxv4f64_nxv4f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu @@ -1304,7 +1306,7 @@ entry: %1, double %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -1312,9 +1314,9 @@ entry: declare @llvm.riscv.vfmin.nxv8f64.f64( , double, - i64); + iXLen); -define @intrinsic_vfmin_vf_nxv8f64_nxv8f64_f64( %0, double %1, i64 %2) nounwind { +define @intrinsic_vfmin_vf_nxv8f64_nxv8f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu @@ -1324,7 +1326,7 @@ entry: %a = call @llvm.riscv.vfmin.nxv8f64.f64( %0, double %1, - i64 %2) + iXLen %2) ret %a } @@ -1334,10 +1336,10 @@ declare @llvm.riscv.vfmin.mask.nxv8f64.f64( , double, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfmin_mask_vf_nxv8f64_nxv8f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { +define @intrinsic_vfmin_mask_vf_nxv8f64_nxv8f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu @@ -1349,7 +1351,7 @@ entry: %1, double %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmsac-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfmsac-rv64.ll deleted file mode 100644 index 2a5fb2896aa5..000000000000 --- a/llvm/test/CodeGen/RISCV/rvv/vfmsac-rv64.ll +++ /dev/null @@ -1,1106 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: -target-abi=lp64d < %s | FileCheck %s -declare @llvm.riscv.vfmsac.nxv1f16.nxv1f16( - , - , - , - i64); - -define @intrinsic_vfmsac_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmsac_vv_nxv1f16_nxv1f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu -; CHECK-NEXT: vfmsac.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsac.nxv1f16.nxv1f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfmsac.mask.nxv1f16.nxv1f16( - , - , - , - , - i64); - -define @intrinsic_vfmsac_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv1f16_nxv1f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu -; CHECK-NEXT: vfmsac.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsac.mask.nxv1f16.nxv1f16( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfmsac.nxv2f16.nxv2f16( - , - , - , - i64); - -define @intrinsic_vfmsac_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmsac_vv_nxv2f16_nxv2f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu -; CHECK-NEXT: vfmsac.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsac.nxv2f16.nxv2f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfmsac.mask.nxv2f16.nxv2f16( - , - , - , - , - i64); - -define @intrinsic_vfmsac_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv2f16_nxv2f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu -; CHECK-NEXT: vfmsac.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsac.mask.nxv2f16.nxv2f16( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfmsac.nxv4f16.nxv4f16( - , - , - , - i64); - -define @intrinsic_vfmsac_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmsac_vv_nxv4f16_nxv4f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu -; CHECK-NEXT: vfmsac.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsac.nxv4f16.nxv4f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfmsac.mask.nxv4f16.nxv4f16( - , - , - , - , - i64); - -define @intrinsic_vfmsac_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv4f16_nxv4f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu -; CHECK-NEXT: vfmsac.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsac.mask.nxv4f16.nxv4f16( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfmsac.nxv8f16.nxv8f16( - , - , - , - i64); - -define @intrinsic_vfmsac_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmsac_vv_nxv8f16_nxv8f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu -; CHECK-NEXT: vfmsac.vv v8, v10, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsac.nxv8f16.nxv8f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfmsac.mask.nxv8f16.nxv8f16( - , - , - , - , - i64); - -define @intrinsic_vfmsac_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv8f16_nxv8f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu -; CHECK-NEXT: vfmsac.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsac.mask.nxv8f16.nxv8f16( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfmsac.nxv16f16.nxv16f16( - , - , - , - i64); - -define @intrinsic_vfmsac_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmsac_vv_nxv16f16_nxv16f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu -; CHECK-NEXT: vfmsac.vv v8, v12, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsac.nxv16f16.nxv16f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfmsac.mask.nxv16f16.nxv16f16( - , - , - , - , - i64); - -define @intrinsic_vfmsac_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv16f16_nxv16f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu -; CHECK-NEXT: vfmsac.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsac.mask.nxv16f16.nxv16f16( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfmsac.nxv1f32.nxv1f32( - , - , - , - i64); - -define @intrinsic_vfmsac_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmsac_vv_nxv1f32_nxv1f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu -; CHECK-NEXT: vfmsac.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsac.nxv1f32.nxv1f32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfmsac.mask.nxv1f32.nxv1f32( - , - , - , - , - i64); - -define @intrinsic_vfmsac_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv1f32_nxv1f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu -; CHECK-NEXT: vfmsac.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsac.mask.nxv1f32.nxv1f32( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfmsac.nxv2f32.nxv2f32( - , - , - , - i64); - -define @intrinsic_vfmsac_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmsac_vv_nxv2f32_nxv2f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu -; CHECK-NEXT: vfmsac.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsac.nxv2f32.nxv2f32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfmsac.mask.nxv2f32.nxv2f32( - , - , - , - , - i64); - -define @intrinsic_vfmsac_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv2f32_nxv2f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu -; CHECK-NEXT: vfmsac.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsac.mask.nxv2f32.nxv2f32( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfmsac.nxv4f32.nxv4f32( - , - , - , - i64); - -define @intrinsic_vfmsac_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmsac_vv_nxv4f32_nxv4f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu -; CHECK-NEXT: vfmsac.vv v8, v10, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsac.nxv4f32.nxv4f32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfmsac.mask.nxv4f32.nxv4f32( - , - , - , - , - i64); - -define @intrinsic_vfmsac_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv4f32_nxv4f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu -; CHECK-NEXT: vfmsac.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsac.mask.nxv4f32.nxv4f32( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfmsac.nxv8f32.nxv8f32( - , - , - , - i64); - -define @intrinsic_vfmsac_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmsac_vv_nxv8f32_nxv8f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu -; CHECK-NEXT: vfmsac.vv v8, v12, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsac.nxv8f32.nxv8f32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfmsac.mask.nxv8f32.nxv8f32( - , - , - , - , - i64); - -define @intrinsic_vfmsac_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv8f32_nxv8f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu -; CHECK-NEXT: vfmsac.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsac.mask.nxv8f32.nxv8f32( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfmsac.nxv1f64.nxv1f64( - , - , - , - i64); - -define @intrinsic_vfmsac_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmsac_vv_nxv1f64_nxv1f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu -; CHECK-NEXT: vfmsac.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsac.nxv1f64.nxv1f64( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfmsac.mask.nxv1f64.nxv1f64( - , - , - , - , - i64); - -define @intrinsic_vfmsac_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv1f64_nxv1f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu -; CHECK-NEXT: vfmsac.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsac.mask.nxv1f64.nxv1f64( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfmsac.nxv2f64.nxv2f64( - , - , - , - i64); - -define @intrinsic_vfmsac_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmsac_vv_nxv2f64_nxv2f64_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu -; CHECK-NEXT: vfmsac.vv v8, v10, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsac.nxv2f64.nxv2f64( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfmsac.mask.nxv2f64.nxv2f64( - , - , - , - , - i64); - -define @intrinsic_vfmsac_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv2f64_nxv2f64_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu -; CHECK-NEXT: vfmsac.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsac.mask.nxv2f64.nxv2f64( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfmsac.nxv4f64.nxv4f64( - , - , - , - i64); - -define @intrinsic_vfmsac_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmsac_vv_nxv4f64_nxv4f64_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu -; CHECK-NEXT: vfmsac.vv v8, v12, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsac.nxv4f64.nxv4f64( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfmsac.mask.nxv4f64.nxv4f64( - , - , - , - , - i64); - -define @intrinsic_vfmsac_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv4f64_nxv4f64_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu -; CHECK-NEXT: vfmsac.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsac.mask.nxv4f64.nxv4f64( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfmsac.nxv1f16.f16( - , - half, - , - i64); - -define @intrinsic_vfmsac_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmsac_vf_nxv1f16_f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu -; CHECK-NEXT: vfmsac.vf v8, fa0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsac.nxv1f16.f16( - %0, - half %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfmsac.mask.nxv1f16.f16( - , - half, - , - , - i64); - -define @intrinsic_vfmsac_mask_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv1f16_f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu -; CHECK-NEXT: vfmsac.vf v8, fa0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsac.mask.nxv1f16.f16( - %0, - half %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfmsac.nxv2f16.f16( - , - half, - , - i64); - -define @intrinsic_vfmsac_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmsac_vf_nxv2f16_f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu -; CHECK-NEXT: vfmsac.vf v8, fa0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsac.nxv2f16.f16( - %0, - half %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfmsac.mask.nxv2f16.f16( - , - half, - , - , - i64); - -define @intrinsic_vfmsac_mask_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv2f16_f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu -; CHECK-NEXT: vfmsac.vf v8, fa0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsac.mask.nxv2f16.f16( - %0, - half %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfmsac.nxv4f16.f16( - , - half, - , - i64); - -define @intrinsic_vfmsac_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmsac_vf_nxv4f16_f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu -; CHECK-NEXT: vfmsac.vf v8, fa0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsac.nxv4f16.f16( - %0, - half %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfmsac.mask.nxv4f16.f16( - , - half, - , - , - i64); - -define @intrinsic_vfmsac_mask_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv4f16_f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu -; CHECK-NEXT: vfmsac.vf v8, fa0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsac.mask.nxv4f16.f16( - %0, - half %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfmsac.nxv8f16.f16( - , - half, - , - i64); - -define @intrinsic_vfmsac_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmsac_vf_nxv8f16_f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu -; CHECK-NEXT: vfmsac.vf v8, fa0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsac.nxv8f16.f16( - %0, - half %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfmsac.mask.nxv8f16.f16( - , - half, - , - , - i64); - -define @intrinsic_vfmsac_mask_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv8f16_f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu -; CHECK-NEXT: vfmsac.vf v8, fa0, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsac.mask.nxv8f16.f16( - %0, - half %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfmsac.nxv16f16.f16( - , - half, - , - i64); - -define @intrinsic_vfmsac_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmsac_vf_nxv16f16_f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu -; CHECK-NEXT: vfmsac.vf v8, fa0, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsac.nxv16f16.f16( - %0, - half %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfmsac.mask.nxv16f16.f16( - , - half, - , - , - i64); - -define @intrinsic_vfmsac_mask_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv16f16_f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu -; CHECK-NEXT: vfmsac.vf v8, fa0, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsac.mask.nxv16f16.f16( - %0, - half %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfmsac.nxv1f32.f32( - , - float, - , - i64); - -define @intrinsic_vfmsac_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmsac_vf_nxv1f32_f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu -; CHECK-NEXT: vfmsac.vf v8, fa0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsac.nxv1f32.f32( - %0, - float %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfmsac.mask.nxv1f32.f32( - , - float, - , - , - i64); - -define @intrinsic_vfmsac_mask_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv1f32_f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu -; CHECK-NEXT: vfmsac.vf v8, fa0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsac.mask.nxv1f32.f32( - %0, - float %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfmsac.nxv2f32.f32( - , - float, - , - i64); - -define @intrinsic_vfmsac_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmsac_vf_nxv2f32_f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu -; CHECK-NEXT: vfmsac.vf v8, fa0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsac.nxv2f32.f32( - %0, - float %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfmsac.mask.nxv2f32.f32( - , - float, - , - , - i64); - -define @intrinsic_vfmsac_mask_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv2f32_f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu -; CHECK-NEXT: vfmsac.vf v8, fa0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsac.mask.nxv2f32.f32( - %0, - float %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfmsac.nxv4f32.f32( - , - float, - , - i64); - -define @intrinsic_vfmsac_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmsac_vf_nxv4f32_f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu -; CHECK-NEXT: vfmsac.vf v8, fa0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsac.nxv4f32.f32( - %0, - float %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfmsac.mask.nxv4f32.f32( - , - float, - , - , - i64); - -define @intrinsic_vfmsac_mask_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv4f32_f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu -; CHECK-NEXT: vfmsac.vf v8, fa0, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsac.mask.nxv4f32.f32( - %0, - float %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfmsac.nxv8f32.f32( - , - float, - , - i64); - -define @intrinsic_vfmsac_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmsac_vf_nxv8f32_f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu -; CHECK-NEXT: vfmsac.vf v8, fa0, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsac.nxv8f32.f32( - %0, - float %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfmsac.mask.nxv8f32.f32( - , - float, - , - , - i64); - -define @intrinsic_vfmsac_mask_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv8f32_f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu -; CHECK-NEXT: vfmsac.vf v8, fa0, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsac.mask.nxv8f32.f32( - %0, - float %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfmsac.nxv1f64.f64( - , - double, - , - i64); - -define @intrinsic_vfmsac_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmsac_vf_nxv1f64_f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu -; CHECK-NEXT: vfmsac.vf v8, fa0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsac.nxv1f64.f64( - %0, - double %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfmsac.mask.nxv1f64.f64( - , - double, - , - , - i64); - -define @intrinsic_vfmsac_mask_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv1f64_f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu -; CHECK-NEXT: vfmsac.vf v8, fa0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsac.mask.nxv1f64.f64( - %0, - double %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfmsac.nxv2f64.f64( - , - double, - , - i64); - -define @intrinsic_vfmsac_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmsac_vf_nxv2f64_f64_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu -; CHECK-NEXT: vfmsac.vf v8, fa0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsac.nxv2f64.f64( - %0, - double %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfmsac.mask.nxv2f64.f64( - , - double, - , - , - i64); - -define @intrinsic_vfmsac_mask_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv2f64_f64_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu -; CHECK-NEXT: vfmsac.vf v8, fa0, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsac.mask.nxv2f64.f64( - %0, - double %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfmsac.nxv4f64.f64( - , - double, - , - i64); - -define @intrinsic_vfmsac_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmsac_vf_nxv4f64_f64_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu -; CHECK-NEXT: vfmsac.vf v8, fa0, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsac.nxv4f64.f64( - %0, - double %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfmsac.mask.nxv4f64.f64( - , - double, - , - , - i64); - -define @intrinsic_vfmsac_mask_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv4f64_f64_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu -; CHECK-NEXT: vfmsac.vf v8, fa0, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsac.mask.nxv4f64.f64( - %0, - double %1, - %2, - %3, - i64 %4) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmsac-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfmsac.ll similarity index 89% rename from llvm/test/CodeGen/RISCV/rvv/vfmsac-rv32.ll rename to llvm/test/CodeGen/RISCV/rvv/vfmsac.ll index c8407dfe6473..cf9df7550fcc 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfmsac-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmsac.ll @@ -1,13 +1,15 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: -target-abi=ilp32d < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfmsac.nxv1f16.nxv1f16( , , , - i32); + iXLen); -define @intrinsic_vfmsac_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfmsac_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu @@ -18,7 +20,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -28,9 +30,9 @@ declare @llvm.riscv.vfmsac.mask.nxv1f16.nxv1f16( , , , - i32); + iXLen); -define @intrinsic_vfmsac_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmsac_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu @@ -42,7 +44,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -51,9 +53,9 @@ declare @llvm.riscv.vfmsac.nxv2f16.nxv2f16( , , , - i32); + iXLen); -define @intrinsic_vfmsac_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfmsac_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu @@ -64,7 +66,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -74,9 +76,9 @@ declare @llvm.riscv.vfmsac.mask.nxv2f16.nxv2f16( , , , - i32); + iXLen); -define @intrinsic_vfmsac_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmsac_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu @@ -88,7 +90,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -97,9 +99,9 @@ declare @llvm.riscv.vfmsac.nxv4f16.nxv4f16( , , , - i32); + iXLen); -define @intrinsic_vfmsac_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfmsac_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu @@ -110,7 +112,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -120,9 +122,9 @@ declare @llvm.riscv.vfmsac.mask.nxv4f16.nxv4f16( , , , - i32); + iXLen); -define @intrinsic_vfmsac_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmsac_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu @@ -134,7 +136,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -143,9 +145,9 @@ declare @llvm.riscv.vfmsac.nxv8f16.nxv8f16( , , , - i32); + iXLen); -define @intrinsic_vfmsac_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfmsac_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu @@ -156,7 +158,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -166,9 +168,9 @@ declare @llvm.riscv.vfmsac.mask.nxv8f16.nxv8f16( , , , - i32); + iXLen); -define @intrinsic_vfmsac_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmsac_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu @@ -180,7 +182,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -189,9 +191,9 @@ declare @llvm.riscv.vfmsac.nxv16f16.nxv16f16( , , , - i32); + iXLen); -define @intrinsic_vfmsac_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfmsac_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu @@ -202,7 +204,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -212,9 +214,9 @@ declare @llvm.riscv.vfmsac.mask.nxv16f16.nxv16f16( , , , - i32); + iXLen); -define @intrinsic_vfmsac_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmsac_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu @@ -226,7 +228,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -235,9 +237,9 @@ declare @llvm.riscv.vfmsac.nxv1f32.nxv1f32( , , , - i32); + iXLen); -define @intrinsic_vfmsac_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfmsac_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu @@ -248,7 +250,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -258,9 +260,9 @@ declare @llvm.riscv.vfmsac.mask.nxv1f32.nxv1f32( , , , - i32); + iXLen); -define @intrinsic_vfmsac_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmsac_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu @@ -272,7 +274,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -281,9 +283,9 @@ declare @llvm.riscv.vfmsac.nxv2f32.nxv2f32( , , , - i32); + iXLen); -define @intrinsic_vfmsac_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfmsac_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu @@ -294,7 +296,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -304,9 +306,9 @@ declare @llvm.riscv.vfmsac.mask.nxv2f32.nxv2f32( , , , - i32); + iXLen); -define @intrinsic_vfmsac_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmsac_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu @@ -318,7 +320,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -327,9 +329,9 @@ declare @llvm.riscv.vfmsac.nxv4f32.nxv4f32( , , , - i32); + iXLen); -define @intrinsic_vfmsac_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfmsac_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu @@ -340,7 +342,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -350,9 +352,9 @@ declare @llvm.riscv.vfmsac.mask.nxv4f32.nxv4f32( , , , - i32); + iXLen); -define @intrinsic_vfmsac_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmsac_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu @@ -364,7 +366,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -373,9 +375,9 @@ declare @llvm.riscv.vfmsac.nxv8f32.nxv8f32( , , , - i32); + iXLen); -define @intrinsic_vfmsac_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfmsac_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu @@ -386,7 +388,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -396,9 +398,9 @@ declare @llvm.riscv.vfmsac.mask.nxv8f32.nxv8f32( , , , - i32); + iXLen); -define @intrinsic_vfmsac_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmsac_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu @@ -410,7 +412,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -419,9 +421,9 @@ declare @llvm.riscv.vfmsac.nxv1f64.nxv1f64( , , , - i32); + iXLen); -define @intrinsic_vfmsac_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfmsac_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu @@ -432,7 +434,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -442,9 +444,9 @@ declare @llvm.riscv.vfmsac.mask.nxv1f64.nxv1f64( , , , - i32); + iXLen); -define @intrinsic_vfmsac_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmsac_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu @@ -456,7 +458,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -465,9 +467,9 @@ declare @llvm.riscv.vfmsac.nxv2f64.nxv2f64( , , , - i32); + iXLen); -define @intrinsic_vfmsac_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfmsac_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu @@ -478,7 +480,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -488,9 +490,9 @@ declare @llvm.riscv.vfmsac.mask.nxv2f64.nxv2f64( , , , - i32); + iXLen); -define @intrinsic_vfmsac_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmsac_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu @@ -502,7 +504,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -511,9 +513,9 @@ declare @llvm.riscv.vfmsac.nxv4f64.nxv4f64( , , , - i32); + iXLen); -define @intrinsic_vfmsac_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfmsac_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu @@ -524,7 +526,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -534,9 +536,9 @@ declare @llvm.riscv.vfmsac.mask.nxv4f64.nxv4f64( , , , - i32); + iXLen); -define @intrinsic_vfmsac_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmsac_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu @@ -548,7 +550,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -557,9 +559,9 @@ declare @llvm.riscv.vfmsac.nxv1f16.f16( , half, , - i32); + iXLen); -define @intrinsic_vfmsac_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, i32 %3) nounwind { +define @intrinsic_vfmsac_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vf_nxv1f16_f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu @@ -570,7 +572,7 @@ entry: %0, half %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -580,9 +582,9 @@ declare @llvm.riscv.vfmsac.mask.nxv1f16.f16( half, , , - i32); + iXLen); -define @intrinsic_vfmsac_mask_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmsac_mask_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv1f16_f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu @@ -594,7 +596,7 @@ entry: half %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -603,9 +605,9 @@ declare @llvm.riscv.vfmsac.nxv2f16.f16( , half, , - i32); + iXLen); -define @intrinsic_vfmsac_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, i32 %3) nounwind { +define @intrinsic_vfmsac_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vf_nxv2f16_f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu @@ -616,7 +618,7 @@ entry: %0, half %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -626,9 +628,9 @@ declare @llvm.riscv.vfmsac.mask.nxv2f16.f16( half, , , - i32); + iXLen); -define @intrinsic_vfmsac_mask_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmsac_mask_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv2f16_f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu @@ -640,7 +642,7 @@ entry: half %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -649,9 +651,9 @@ declare @llvm.riscv.vfmsac.nxv4f16.f16( , half, , - i32); + iXLen); -define @intrinsic_vfmsac_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, i32 %3) nounwind { +define @intrinsic_vfmsac_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vf_nxv4f16_f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu @@ -662,7 +664,7 @@ entry: %0, half %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -672,9 +674,9 @@ declare @llvm.riscv.vfmsac.mask.nxv4f16.f16( half, , , - i32); + iXLen); -define @intrinsic_vfmsac_mask_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmsac_mask_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv4f16_f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu @@ -686,7 +688,7 @@ entry: half %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -695,9 +697,9 @@ declare @llvm.riscv.vfmsac.nxv8f16.f16( , half, , - i32); + iXLen); -define @intrinsic_vfmsac_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, i32 %3) nounwind { +define @intrinsic_vfmsac_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vf_nxv8f16_f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu @@ -708,7 +710,7 @@ entry: %0, half %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -718,9 +720,9 @@ declare @llvm.riscv.vfmsac.mask.nxv8f16.f16( half, , , - i32); + iXLen); -define @intrinsic_vfmsac_mask_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmsac_mask_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv8f16_f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu @@ -732,7 +734,7 @@ entry: half %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -741,9 +743,9 @@ declare @llvm.riscv.vfmsac.nxv16f16.f16( , half, , - i32); + iXLen); -define @intrinsic_vfmsac_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, i32 %3) nounwind { +define @intrinsic_vfmsac_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vf_nxv16f16_f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu @@ -754,7 +756,7 @@ entry: %0, half %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -764,9 +766,9 @@ declare @llvm.riscv.vfmsac.mask.nxv16f16.f16( half, , , - i32); + iXLen); -define @intrinsic_vfmsac_mask_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmsac_mask_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv16f16_f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu @@ -778,7 +780,7 @@ entry: half %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -787,9 +789,9 @@ declare @llvm.riscv.vfmsac.nxv1f32.f32( , float, , - i32); + iXLen); -define @intrinsic_vfmsac_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, i32 %3) nounwind { +define @intrinsic_vfmsac_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vf_nxv1f32_f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu @@ -800,7 +802,7 @@ entry: %0, float %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -810,9 +812,9 @@ declare @llvm.riscv.vfmsac.mask.nxv1f32.f32( float, , , - i32); + iXLen); -define @intrinsic_vfmsac_mask_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmsac_mask_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv1f32_f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu @@ -824,7 +826,7 @@ entry: float %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -833,9 +835,9 @@ declare @llvm.riscv.vfmsac.nxv2f32.f32( , float, , - i32); + iXLen); -define @intrinsic_vfmsac_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, i32 %3) nounwind { +define @intrinsic_vfmsac_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vf_nxv2f32_f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu @@ -846,7 +848,7 @@ entry: %0, float %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -856,9 +858,9 @@ declare @llvm.riscv.vfmsac.mask.nxv2f32.f32( float, , , - i32); + iXLen); -define @intrinsic_vfmsac_mask_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmsac_mask_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv2f32_f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu @@ -870,7 +872,7 @@ entry: float %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -879,9 +881,9 @@ declare @llvm.riscv.vfmsac.nxv4f32.f32( , float, , - i32); + iXLen); -define @intrinsic_vfmsac_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, i32 %3) nounwind { +define @intrinsic_vfmsac_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vf_nxv4f32_f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu @@ -892,7 +894,7 @@ entry: %0, float %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -902,9 +904,9 @@ declare @llvm.riscv.vfmsac.mask.nxv4f32.f32( float, , , - i32); + iXLen); -define @intrinsic_vfmsac_mask_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmsac_mask_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv4f32_f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu @@ -916,7 +918,7 @@ entry: float %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -925,9 +927,9 @@ declare @llvm.riscv.vfmsac.nxv8f32.f32( , float, , - i32); + iXLen); -define @intrinsic_vfmsac_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, i32 %3) nounwind { +define @intrinsic_vfmsac_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vf_nxv8f32_f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu @@ -938,7 +940,7 @@ entry: %0, float %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -948,9 +950,9 @@ declare @llvm.riscv.vfmsac.mask.nxv8f32.f32( float, , , - i32); + iXLen); -define @intrinsic_vfmsac_mask_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmsac_mask_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv8f32_f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu @@ -962,7 +964,7 @@ entry: float %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -971,9 +973,9 @@ declare @llvm.riscv.vfmsac.nxv1f64.f64( , double, , - i32); + iXLen); -define @intrinsic_vfmsac_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, i32 %3) nounwind { +define @intrinsic_vfmsac_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vf_nxv1f64_f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu @@ -984,7 +986,7 @@ entry: %0, double %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -994,9 +996,9 @@ declare @llvm.riscv.vfmsac.mask.nxv1f64.f64( double, , , - i32); + iXLen); -define @intrinsic_vfmsac_mask_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmsac_mask_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv1f64_f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu @@ -1008,7 +1010,7 @@ entry: double %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -1017,9 +1019,9 @@ declare @llvm.riscv.vfmsac.nxv2f64.f64( , double, , - i32); + iXLen); -define @intrinsic_vfmsac_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, i32 %3) nounwind { +define @intrinsic_vfmsac_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vf_nxv2f64_f64_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu @@ -1030,7 +1032,7 @@ entry: %0, double %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -1040,9 +1042,9 @@ declare @llvm.riscv.vfmsac.mask.nxv2f64.f64( double, , , - i32); + iXLen); -define @intrinsic_vfmsac_mask_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmsac_mask_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv2f64_f64_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu @@ -1054,7 +1056,7 @@ entry: double %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -1063,9 +1065,9 @@ declare @llvm.riscv.vfmsac.nxv4f64.f64( , double, , - i32); + iXLen); -define @intrinsic_vfmsac_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, i32 %3) nounwind { +define @intrinsic_vfmsac_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vf_nxv4f64_f64_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu @@ -1076,7 +1078,7 @@ entry: %0, double %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -1086,9 +1088,9 @@ declare @llvm.riscv.vfmsac.mask.nxv4f64.f64( double, , , - i32); + iXLen); -define @intrinsic_vfmsac_mask_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmsac_mask_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv4f64_f64_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu @@ -1100,7 +1102,7 @@ entry: double %1, %2, %3, - i32 %4) + iXLen %4) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmsub-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfmsub-rv64.ll deleted file mode 100644 index 70efc0da21f5..000000000000 --- a/llvm/test/CodeGen/RISCV/rvv/vfmsub-rv64.ll +++ /dev/null @@ -1,1106 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: -target-abi=lp64d < %s | FileCheck %s -declare @llvm.riscv.vfmsub.nxv1f16.nxv1f16( - , - , - , - i64); - -define @intrinsic_vfmsub_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmsub_vv_nxv1f16_nxv1f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu -; CHECK-NEXT: vfmsub.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsub.nxv1f16.nxv1f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfmsub.mask.nxv1f16.nxv1f16( - , - , - , - , - i64); - -define @intrinsic_vfmsub_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv1f16_nxv1f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu -; CHECK-NEXT: vfmsub.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsub.mask.nxv1f16.nxv1f16( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfmsub.nxv2f16.nxv2f16( - , - , - , - i64); - -define @intrinsic_vfmsub_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmsub_vv_nxv2f16_nxv2f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu -; CHECK-NEXT: vfmsub.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsub.nxv2f16.nxv2f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfmsub.mask.nxv2f16.nxv2f16( - , - , - , - , - i64); - -define @intrinsic_vfmsub_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv2f16_nxv2f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu -; CHECK-NEXT: vfmsub.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsub.mask.nxv2f16.nxv2f16( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfmsub.nxv4f16.nxv4f16( - , - , - , - i64); - -define @intrinsic_vfmsub_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmsub_vv_nxv4f16_nxv4f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu -; CHECK-NEXT: vfmsub.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsub.nxv4f16.nxv4f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfmsub.mask.nxv4f16.nxv4f16( - , - , - , - , - i64); - -define @intrinsic_vfmsub_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv4f16_nxv4f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu -; CHECK-NEXT: vfmsub.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsub.mask.nxv4f16.nxv4f16( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfmsub.nxv8f16.nxv8f16( - , - , - , - i64); - -define @intrinsic_vfmsub_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmsub_vv_nxv8f16_nxv8f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu -; CHECK-NEXT: vfmsub.vv v8, v10, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsub.nxv8f16.nxv8f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfmsub.mask.nxv8f16.nxv8f16( - , - , - , - , - i64); - -define @intrinsic_vfmsub_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv8f16_nxv8f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu -; CHECK-NEXT: vfmsub.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsub.mask.nxv8f16.nxv8f16( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfmsub.nxv16f16.nxv16f16( - , - , - , - i64); - -define @intrinsic_vfmsub_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmsub_vv_nxv16f16_nxv16f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu -; CHECK-NEXT: vfmsub.vv v8, v12, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsub.nxv16f16.nxv16f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfmsub.mask.nxv16f16.nxv16f16( - , - , - , - , - i64); - -define @intrinsic_vfmsub_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv16f16_nxv16f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu -; CHECK-NEXT: vfmsub.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsub.mask.nxv16f16.nxv16f16( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfmsub.nxv1f32.nxv1f32( - , - , - , - i64); - -define @intrinsic_vfmsub_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmsub_vv_nxv1f32_nxv1f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu -; CHECK-NEXT: vfmsub.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsub.nxv1f32.nxv1f32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfmsub.mask.nxv1f32.nxv1f32( - , - , - , - , - i64); - -define @intrinsic_vfmsub_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv1f32_nxv1f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu -; CHECK-NEXT: vfmsub.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsub.mask.nxv1f32.nxv1f32( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfmsub.nxv2f32.nxv2f32( - , - , - , - i64); - -define @intrinsic_vfmsub_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmsub_vv_nxv2f32_nxv2f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu -; CHECK-NEXT: vfmsub.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsub.nxv2f32.nxv2f32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfmsub.mask.nxv2f32.nxv2f32( - , - , - , - , - i64); - -define @intrinsic_vfmsub_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv2f32_nxv2f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu -; CHECK-NEXT: vfmsub.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsub.mask.nxv2f32.nxv2f32( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfmsub.nxv4f32.nxv4f32( - , - , - , - i64); - -define @intrinsic_vfmsub_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmsub_vv_nxv4f32_nxv4f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu -; CHECK-NEXT: vfmsub.vv v8, v10, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsub.nxv4f32.nxv4f32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfmsub.mask.nxv4f32.nxv4f32( - , - , - , - , - i64); - -define @intrinsic_vfmsub_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv4f32_nxv4f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu -; CHECK-NEXT: vfmsub.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsub.mask.nxv4f32.nxv4f32( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfmsub.nxv8f32.nxv8f32( - , - , - , - i64); - -define @intrinsic_vfmsub_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmsub_vv_nxv8f32_nxv8f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu -; CHECK-NEXT: vfmsub.vv v8, v12, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsub.nxv8f32.nxv8f32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfmsub.mask.nxv8f32.nxv8f32( - , - , - , - , - i64); - -define @intrinsic_vfmsub_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv8f32_nxv8f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu -; CHECK-NEXT: vfmsub.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsub.mask.nxv8f32.nxv8f32( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfmsub.nxv1f64.nxv1f64( - , - , - , - i64); - -define @intrinsic_vfmsub_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmsub_vv_nxv1f64_nxv1f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu -; CHECK-NEXT: vfmsub.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsub.nxv1f64.nxv1f64( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfmsub.mask.nxv1f64.nxv1f64( - , - , - , - , - i64); - -define @intrinsic_vfmsub_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv1f64_nxv1f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu -; CHECK-NEXT: vfmsub.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsub.mask.nxv1f64.nxv1f64( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfmsub.nxv2f64.nxv2f64( - , - , - , - i64); - -define @intrinsic_vfmsub_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmsub_vv_nxv2f64_nxv2f64_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu -; CHECK-NEXT: vfmsub.vv v8, v10, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsub.nxv2f64.nxv2f64( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfmsub.mask.nxv2f64.nxv2f64( - , - , - , - , - i64); - -define @intrinsic_vfmsub_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv2f64_nxv2f64_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu -; CHECK-NEXT: vfmsub.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsub.mask.nxv2f64.nxv2f64( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfmsub.nxv4f64.nxv4f64( - , - , - , - i64); - -define @intrinsic_vfmsub_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmsub_vv_nxv4f64_nxv4f64_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu -; CHECK-NEXT: vfmsub.vv v8, v12, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsub.nxv4f64.nxv4f64( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfmsub.mask.nxv4f64.nxv4f64( - , - , - , - , - i64); - -define @intrinsic_vfmsub_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv4f64_nxv4f64_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu -; CHECK-NEXT: vfmsub.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsub.mask.nxv4f64.nxv4f64( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfmsub.nxv1f16.f16( - , - half, - , - i64); - -define @intrinsic_vfmsub_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmsub_vf_nxv1f16_f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu -; CHECK-NEXT: vfmsub.vf v8, fa0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsub.nxv1f16.f16( - %0, - half %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfmsub.mask.nxv1f16.f16( - , - half, - , - , - i64); - -define @intrinsic_vfmsub_mask_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv1f16_f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu -; CHECK-NEXT: vfmsub.vf v8, fa0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsub.mask.nxv1f16.f16( - %0, - half %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfmsub.nxv2f16.f16( - , - half, - , - i64); - -define @intrinsic_vfmsub_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmsub_vf_nxv2f16_f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu -; CHECK-NEXT: vfmsub.vf v8, fa0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsub.nxv2f16.f16( - %0, - half %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfmsub.mask.nxv2f16.f16( - , - half, - , - , - i64); - -define @intrinsic_vfmsub_mask_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv2f16_f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu -; CHECK-NEXT: vfmsub.vf v8, fa0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsub.mask.nxv2f16.f16( - %0, - half %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfmsub.nxv4f16.f16( - , - half, - , - i64); - -define @intrinsic_vfmsub_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmsub_vf_nxv4f16_f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu -; CHECK-NEXT: vfmsub.vf v8, fa0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsub.nxv4f16.f16( - %0, - half %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfmsub.mask.nxv4f16.f16( - , - half, - , - , - i64); - -define @intrinsic_vfmsub_mask_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv4f16_f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu -; CHECK-NEXT: vfmsub.vf v8, fa0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsub.mask.nxv4f16.f16( - %0, - half %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfmsub.nxv8f16.f16( - , - half, - , - i64); - -define @intrinsic_vfmsub_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmsub_vf_nxv8f16_f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu -; CHECK-NEXT: vfmsub.vf v8, fa0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsub.nxv8f16.f16( - %0, - half %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfmsub.mask.nxv8f16.f16( - , - half, - , - , - i64); - -define @intrinsic_vfmsub_mask_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv8f16_f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu -; CHECK-NEXT: vfmsub.vf v8, fa0, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsub.mask.nxv8f16.f16( - %0, - half %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfmsub.nxv16f16.f16( - , - half, - , - i64); - -define @intrinsic_vfmsub_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmsub_vf_nxv16f16_f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu -; CHECK-NEXT: vfmsub.vf v8, fa0, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsub.nxv16f16.f16( - %0, - half %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfmsub.mask.nxv16f16.f16( - , - half, - , - , - i64); - -define @intrinsic_vfmsub_mask_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv16f16_f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu -; CHECK-NEXT: vfmsub.vf v8, fa0, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsub.mask.nxv16f16.f16( - %0, - half %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfmsub.nxv1f32.f32( - , - float, - , - i64); - -define @intrinsic_vfmsub_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmsub_vf_nxv1f32_f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu -; CHECK-NEXT: vfmsub.vf v8, fa0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsub.nxv1f32.f32( - %0, - float %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfmsub.mask.nxv1f32.f32( - , - float, - , - , - i64); - -define @intrinsic_vfmsub_mask_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv1f32_f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu -; CHECK-NEXT: vfmsub.vf v8, fa0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsub.mask.nxv1f32.f32( - %0, - float %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfmsub.nxv2f32.f32( - , - float, - , - i64); - -define @intrinsic_vfmsub_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmsub_vf_nxv2f32_f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu -; CHECK-NEXT: vfmsub.vf v8, fa0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsub.nxv2f32.f32( - %0, - float %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfmsub.mask.nxv2f32.f32( - , - float, - , - , - i64); - -define @intrinsic_vfmsub_mask_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv2f32_f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu -; CHECK-NEXT: vfmsub.vf v8, fa0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsub.mask.nxv2f32.f32( - %0, - float %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfmsub.nxv4f32.f32( - , - float, - , - i64); - -define @intrinsic_vfmsub_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmsub_vf_nxv4f32_f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu -; CHECK-NEXT: vfmsub.vf v8, fa0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsub.nxv4f32.f32( - %0, - float %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfmsub.mask.nxv4f32.f32( - , - float, - , - , - i64); - -define @intrinsic_vfmsub_mask_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv4f32_f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu -; CHECK-NEXT: vfmsub.vf v8, fa0, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsub.mask.nxv4f32.f32( - %0, - float %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfmsub.nxv8f32.f32( - , - float, - , - i64); - -define @intrinsic_vfmsub_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmsub_vf_nxv8f32_f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu -; CHECK-NEXT: vfmsub.vf v8, fa0, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsub.nxv8f32.f32( - %0, - float %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfmsub.mask.nxv8f32.f32( - , - float, - , - , - i64); - -define @intrinsic_vfmsub_mask_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv8f32_f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu -; CHECK-NEXT: vfmsub.vf v8, fa0, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsub.mask.nxv8f32.f32( - %0, - float %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfmsub.nxv1f64.f64( - , - double, - , - i64); - -define @intrinsic_vfmsub_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmsub_vf_nxv1f64_f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu -; CHECK-NEXT: vfmsub.vf v8, fa0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsub.nxv1f64.f64( - %0, - double %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfmsub.mask.nxv1f64.f64( - , - double, - , - , - i64); - -define @intrinsic_vfmsub_mask_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv1f64_f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu -; CHECK-NEXT: vfmsub.vf v8, fa0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsub.mask.nxv1f64.f64( - %0, - double %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfmsub.nxv2f64.f64( - , - double, - , - i64); - -define @intrinsic_vfmsub_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmsub_vf_nxv2f64_f64_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu -; CHECK-NEXT: vfmsub.vf v8, fa0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsub.nxv2f64.f64( - %0, - double %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfmsub.mask.nxv2f64.f64( - , - double, - , - , - i64); - -define @intrinsic_vfmsub_mask_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv2f64_f64_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu -; CHECK-NEXT: vfmsub.vf v8, fa0, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsub.mask.nxv2f64.f64( - %0, - double %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfmsub.nxv4f64.f64( - , - double, - , - i64); - -define @intrinsic_vfmsub_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfmsub_vf_nxv4f64_f64_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu -; CHECK-NEXT: vfmsub.vf v8, fa0, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsub.nxv4f64.f64( - %0, - double %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfmsub.mask.nxv4f64.f64( - , - double, - , - , - i64); - -define @intrinsic_vfmsub_mask_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv4f64_f64_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu -; CHECK-NEXT: vfmsub.vf v8, fa0, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmsub.mask.nxv4f64.f64( - %0, - double %1, - %2, - %3, - i64 %4) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmsub-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfmsub.ll similarity index 89% rename from llvm/test/CodeGen/RISCV/rvv/vfmsub-rv32.ll rename to llvm/test/CodeGen/RISCV/rvv/vfmsub.ll index 620c3dcb1025..d071893ceb08 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfmsub-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmsub.ll @@ -1,13 +1,15 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: -target-abi=ilp32d < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfmsub.nxv1f16.nxv1f16( , , , - i32); + iXLen); -define @intrinsic_vfmsub_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfmsub_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu @@ -18,7 +20,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -28,9 +30,9 @@ declare @llvm.riscv.vfmsub.mask.nxv1f16.nxv1f16( , , , - i32); + iXLen); -define @intrinsic_vfmsub_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmsub_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu @@ -42,7 +44,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -51,9 +53,9 @@ declare @llvm.riscv.vfmsub.nxv2f16.nxv2f16( , , , - i32); + iXLen); -define @intrinsic_vfmsub_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfmsub_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu @@ -64,7 +66,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -74,9 +76,9 @@ declare @llvm.riscv.vfmsub.mask.nxv2f16.nxv2f16( , , , - i32); + iXLen); -define @intrinsic_vfmsub_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmsub_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu @@ -88,7 +90,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -97,9 +99,9 @@ declare @llvm.riscv.vfmsub.nxv4f16.nxv4f16( , , , - i32); + iXLen); -define @intrinsic_vfmsub_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfmsub_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu @@ -110,7 +112,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -120,9 +122,9 @@ declare @llvm.riscv.vfmsub.mask.nxv4f16.nxv4f16( , , , - i32); + iXLen); -define @intrinsic_vfmsub_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmsub_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu @@ -134,7 +136,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -143,9 +145,9 @@ declare @llvm.riscv.vfmsub.nxv8f16.nxv8f16( , , , - i32); + iXLen); -define @intrinsic_vfmsub_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfmsub_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu @@ -156,7 +158,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -166,9 +168,9 @@ declare @llvm.riscv.vfmsub.mask.nxv8f16.nxv8f16( , , , - i32); + iXLen); -define @intrinsic_vfmsub_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmsub_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu @@ -180,7 +182,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -189,9 +191,9 @@ declare @llvm.riscv.vfmsub.nxv16f16.nxv16f16( , , , - i32); + iXLen); -define @intrinsic_vfmsub_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfmsub_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu @@ -202,7 +204,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -212,9 +214,9 @@ declare @llvm.riscv.vfmsub.mask.nxv16f16.nxv16f16( , , , - i32); + iXLen); -define @intrinsic_vfmsub_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmsub_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu @@ -226,7 +228,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -235,9 +237,9 @@ declare @llvm.riscv.vfmsub.nxv1f32.nxv1f32( , , , - i32); + iXLen); -define @intrinsic_vfmsub_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfmsub_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu @@ -248,7 +250,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -258,9 +260,9 @@ declare @llvm.riscv.vfmsub.mask.nxv1f32.nxv1f32( , , , - i32); + iXLen); -define @intrinsic_vfmsub_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmsub_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu @@ -272,7 +274,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -281,9 +283,9 @@ declare @llvm.riscv.vfmsub.nxv2f32.nxv2f32( , , , - i32); + iXLen); -define @intrinsic_vfmsub_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfmsub_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu @@ -294,7 +296,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -304,9 +306,9 @@ declare @llvm.riscv.vfmsub.mask.nxv2f32.nxv2f32( , , , - i32); + iXLen); -define @intrinsic_vfmsub_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmsub_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu @@ -318,7 +320,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -327,9 +329,9 @@ declare @llvm.riscv.vfmsub.nxv4f32.nxv4f32( , , , - i32); + iXLen); -define @intrinsic_vfmsub_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfmsub_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu @@ -340,7 +342,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -350,9 +352,9 @@ declare @llvm.riscv.vfmsub.mask.nxv4f32.nxv4f32( , , , - i32); + iXLen); -define @intrinsic_vfmsub_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmsub_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu @@ -364,7 +366,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -373,9 +375,9 @@ declare @llvm.riscv.vfmsub.nxv8f32.nxv8f32( , , , - i32); + iXLen); -define @intrinsic_vfmsub_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfmsub_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu @@ -386,7 +388,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -396,9 +398,9 @@ declare @llvm.riscv.vfmsub.mask.nxv8f32.nxv8f32( , , , - i32); + iXLen); -define @intrinsic_vfmsub_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmsub_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu @@ -410,7 +412,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -419,9 +421,9 @@ declare @llvm.riscv.vfmsub.nxv1f64.nxv1f64( , , , - i32); + iXLen); -define @intrinsic_vfmsub_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfmsub_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu @@ -432,7 +434,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -442,9 +444,9 @@ declare @llvm.riscv.vfmsub.mask.nxv1f64.nxv1f64( , , , - i32); + iXLen); -define @intrinsic_vfmsub_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmsub_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu @@ -456,7 +458,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -465,9 +467,9 @@ declare @llvm.riscv.vfmsub.nxv2f64.nxv2f64( , , , - i32); + iXLen); -define @intrinsic_vfmsub_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfmsub_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu @@ -478,7 +480,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -488,9 +490,9 @@ declare @llvm.riscv.vfmsub.mask.nxv2f64.nxv2f64( , , , - i32); + iXLen); -define @intrinsic_vfmsub_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmsub_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu @@ -502,7 +504,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -511,9 +513,9 @@ declare @llvm.riscv.vfmsub.nxv4f64.nxv4f64( , , , - i32); + iXLen); -define @intrinsic_vfmsub_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfmsub_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu @@ -524,7 +526,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -534,9 +536,9 @@ declare @llvm.riscv.vfmsub.mask.nxv4f64.nxv4f64( , , , - i32); + iXLen); -define @intrinsic_vfmsub_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmsub_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu @@ -548,7 +550,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -557,9 +559,9 @@ declare @llvm.riscv.vfmsub.nxv1f16.f16( , half, , - i32); + iXLen); -define @intrinsic_vfmsub_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, i32 %3) nounwind { +define @intrinsic_vfmsub_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vf_nxv1f16_f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu @@ -570,7 +572,7 @@ entry: %0, half %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -580,9 +582,9 @@ declare @llvm.riscv.vfmsub.mask.nxv1f16.f16( half, , , - i32); + iXLen); -define @intrinsic_vfmsub_mask_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmsub_mask_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv1f16_f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu @@ -594,7 +596,7 @@ entry: half %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -603,9 +605,9 @@ declare @llvm.riscv.vfmsub.nxv2f16.f16( , half, , - i32); + iXLen); -define @intrinsic_vfmsub_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, i32 %3) nounwind { +define @intrinsic_vfmsub_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vf_nxv2f16_f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu @@ -616,7 +618,7 @@ entry: %0, half %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -626,9 +628,9 @@ declare @llvm.riscv.vfmsub.mask.nxv2f16.f16( half, , , - i32); + iXLen); -define @intrinsic_vfmsub_mask_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmsub_mask_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv2f16_f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu @@ -640,7 +642,7 @@ entry: half %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -649,9 +651,9 @@ declare @llvm.riscv.vfmsub.nxv4f16.f16( , half, , - i32); + iXLen); -define @intrinsic_vfmsub_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, i32 %3) nounwind { +define @intrinsic_vfmsub_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vf_nxv4f16_f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu @@ -662,7 +664,7 @@ entry: %0, half %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -672,9 +674,9 @@ declare @llvm.riscv.vfmsub.mask.nxv4f16.f16( half, , , - i32); + iXLen); -define @intrinsic_vfmsub_mask_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmsub_mask_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv4f16_f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu @@ -686,7 +688,7 @@ entry: half %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -695,9 +697,9 @@ declare @llvm.riscv.vfmsub.nxv8f16.f16( , half, , - i32); + iXLen); -define @intrinsic_vfmsub_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, i32 %3) nounwind { +define @intrinsic_vfmsub_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vf_nxv8f16_f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu @@ -708,7 +710,7 @@ entry: %0, half %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -718,9 +720,9 @@ declare @llvm.riscv.vfmsub.mask.nxv8f16.f16( half, , , - i32); + iXLen); -define @intrinsic_vfmsub_mask_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmsub_mask_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv8f16_f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu @@ -732,7 +734,7 @@ entry: half %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -741,9 +743,9 @@ declare @llvm.riscv.vfmsub.nxv16f16.f16( , half, , - i32); + iXLen); -define @intrinsic_vfmsub_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, i32 %3) nounwind { +define @intrinsic_vfmsub_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vf_nxv16f16_f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu @@ -754,7 +756,7 @@ entry: %0, half %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -764,9 +766,9 @@ declare @llvm.riscv.vfmsub.mask.nxv16f16.f16( half, , , - i32); + iXLen); -define @intrinsic_vfmsub_mask_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmsub_mask_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv16f16_f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu @@ -778,7 +780,7 @@ entry: half %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -787,9 +789,9 @@ declare @llvm.riscv.vfmsub.nxv1f32.f32( , float, , - i32); + iXLen); -define @intrinsic_vfmsub_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, i32 %3) nounwind { +define @intrinsic_vfmsub_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vf_nxv1f32_f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu @@ -800,7 +802,7 @@ entry: %0, float %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -810,9 +812,9 @@ declare @llvm.riscv.vfmsub.mask.nxv1f32.f32( float, , , - i32); + iXLen); -define @intrinsic_vfmsub_mask_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmsub_mask_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv1f32_f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu @@ -824,7 +826,7 @@ entry: float %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -833,9 +835,9 @@ declare @llvm.riscv.vfmsub.nxv2f32.f32( , float, , - i32); + iXLen); -define @intrinsic_vfmsub_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, i32 %3) nounwind { +define @intrinsic_vfmsub_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vf_nxv2f32_f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu @@ -846,7 +848,7 @@ entry: %0, float %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -856,9 +858,9 @@ declare @llvm.riscv.vfmsub.mask.nxv2f32.f32( float, , , - i32); + iXLen); -define @intrinsic_vfmsub_mask_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmsub_mask_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv2f32_f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu @@ -870,7 +872,7 @@ entry: float %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -879,9 +881,9 @@ declare @llvm.riscv.vfmsub.nxv4f32.f32( , float, , - i32); + iXLen); -define @intrinsic_vfmsub_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, i32 %3) nounwind { +define @intrinsic_vfmsub_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vf_nxv4f32_f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu @@ -892,7 +894,7 @@ entry: %0, float %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -902,9 +904,9 @@ declare @llvm.riscv.vfmsub.mask.nxv4f32.f32( float, , , - i32); + iXLen); -define @intrinsic_vfmsub_mask_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmsub_mask_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv4f32_f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu @@ -916,7 +918,7 @@ entry: float %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -925,9 +927,9 @@ declare @llvm.riscv.vfmsub.nxv8f32.f32( , float, , - i32); + iXLen); -define @intrinsic_vfmsub_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, i32 %3) nounwind { +define @intrinsic_vfmsub_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vf_nxv8f32_f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu @@ -938,7 +940,7 @@ entry: %0, float %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -948,9 +950,9 @@ declare @llvm.riscv.vfmsub.mask.nxv8f32.f32( float, , , - i32); + iXLen); -define @intrinsic_vfmsub_mask_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmsub_mask_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv8f32_f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu @@ -962,7 +964,7 @@ entry: float %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -971,9 +973,9 @@ declare @llvm.riscv.vfmsub.nxv1f64.f64( , double, , - i32); + iXLen); -define @intrinsic_vfmsub_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, i32 %3) nounwind { +define @intrinsic_vfmsub_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vf_nxv1f64_f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu @@ -984,7 +986,7 @@ entry: %0, double %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -994,9 +996,9 @@ declare @llvm.riscv.vfmsub.mask.nxv1f64.f64( double, , , - i32); + iXLen); -define @intrinsic_vfmsub_mask_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmsub_mask_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv1f64_f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu @@ -1008,7 +1010,7 @@ entry: double %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -1017,9 +1019,9 @@ declare @llvm.riscv.vfmsub.nxv2f64.f64( , double, , - i32); + iXLen); -define @intrinsic_vfmsub_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, i32 %3) nounwind { +define @intrinsic_vfmsub_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vf_nxv2f64_f64_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu @@ -1030,7 +1032,7 @@ entry: %0, double %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -1040,9 +1042,9 @@ declare @llvm.riscv.vfmsub.mask.nxv2f64.f64( double, , , - i32); + iXLen); -define @intrinsic_vfmsub_mask_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmsub_mask_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv2f64_f64_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu @@ -1054,7 +1056,7 @@ entry: double %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -1063,9 +1065,9 @@ declare @llvm.riscv.vfmsub.nxv4f64.f64( , double, , - i32); + iXLen); -define @intrinsic_vfmsub_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, i32 %3) nounwind { +define @intrinsic_vfmsub_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vf_nxv4f64_f64_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu @@ -1076,7 +1078,7 @@ entry: %0, double %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -1086,9 +1088,9 @@ declare @llvm.riscv.vfmsub.mask.nxv4f64.f64( double, , , - i32); + iXLen); -define @intrinsic_vfmsub_mask_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmsub_mask_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv4f64_f64_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu @@ -1100,7 +1102,7 @@ entry: double %1, %2, %3, - i32 %4) + iXLen %4) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmul-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfmul-rv64.ll deleted file mode 100644 index 08aa64b6de7f..000000000000 --- a/llvm/test/CodeGen/RISCV/rvv/vfmul-rv64.ll +++ /dev/null @@ -1,1355 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: -target-abi=lp64d < %s | FileCheck %s -declare @llvm.riscv.vfmul.nxv1f16( - , - , - i64); - -define @intrinsic_vfmul_vv_nxv1f16_nxv1f16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmul_vv_nxv1f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfmul.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmul.nxv1f16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfmul.mask.nxv1f16( - , - , - , - , - i64, - i64); - -define @intrinsic_vfmul_mask_vv_nxv1f16_nxv1f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv1f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfmul.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmul.mask.nxv1f16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfmul.nxv2f16( - , - , - i64); - -define @intrinsic_vfmul_vv_nxv2f16_nxv2f16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmul_vv_nxv2f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfmul.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmul.nxv2f16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfmul.mask.nxv2f16( - , - , - , - , - i64, - i64); - -define @intrinsic_vfmul_mask_vv_nxv2f16_nxv2f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv2f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfmul.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmul.mask.nxv2f16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfmul.nxv4f16( - , - , - i64); - -define @intrinsic_vfmul_vv_nxv4f16_nxv4f16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmul_vv_nxv4f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfmul.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmul.nxv4f16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfmul.mask.nxv4f16( - , - , - , - , - i64, - i64); - -define @intrinsic_vfmul_mask_vv_nxv4f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv4f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfmul.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmul.mask.nxv4f16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfmul.nxv8f16( - , - , - i64); - -define @intrinsic_vfmul_vv_nxv8f16_nxv8f16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmul_vv_nxv8f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfmul.vv v8, v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmul.nxv8f16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfmul.mask.nxv8f16( - , - , - , - , - i64, - i64); - -define @intrinsic_vfmul_mask_vv_nxv8f16_nxv8f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv8f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfmul.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmul.mask.nxv8f16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfmul.nxv16f16( - , - , - i64); - -define @intrinsic_vfmul_vv_nxv16f16_nxv16f16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmul_vv_nxv16f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfmul.vv v8, v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmul.nxv16f16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfmul.mask.nxv16f16( - , - , - , - , - i64, - i64); - -define @intrinsic_vfmul_mask_vv_nxv16f16_nxv16f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv16f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfmul.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmul.mask.nxv16f16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfmul.nxv32f16( - , - , - i64); - -define @intrinsic_vfmul_vv_nxv32f16_nxv32f16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmul_vv_nxv32f16_nxv32f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu -; CHECK-NEXT: vfmul.vv v8, v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmul.nxv32f16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfmul.mask.nxv32f16( - , - , - , - , - i64, - i64); - -define @intrinsic_vfmul_mask_vv_nxv32f16_nxv32f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv32f16_nxv32f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu -; CHECK-NEXT: vfmul.vv v8, v16, v24, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmul.mask.nxv32f16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfmul.nxv1f32( - , - , - i64); - -define @intrinsic_vfmul_vv_nxv1f32_nxv1f32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmul_vv_nxv1f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfmul.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmul.nxv1f32( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfmul.mask.nxv1f32( - , - , - , - , - i64, - i64); - -define @intrinsic_vfmul_mask_vv_nxv1f32_nxv1f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv1f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfmul.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmul.mask.nxv1f32( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfmul.nxv2f32( - , - , - i64); - -define @intrinsic_vfmul_vv_nxv2f32_nxv2f32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmul_vv_nxv2f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfmul.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmul.nxv2f32( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfmul.mask.nxv2f32( - , - , - , - , - i64, - i64); - -define @intrinsic_vfmul_mask_vv_nxv2f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv2f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfmul.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmul.mask.nxv2f32( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfmul.nxv4f32( - , - , - i64); - -define @intrinsic_vfmul_vv_nxv4f32_nxv4f32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmul_vv_nxv4f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfmul.vv v8, v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmul.nxv4f32( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfmul.mask.nxv4f32( - , - , - , - , - i64, - i64); - -define @intrinsic_vfmul_mask_vv_nxv4f32_nxv4f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv4f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfmul.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmul.mask.nxv4f32( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfmul.nxv8f32( - , - , - i64); - -define @intrinsic_vfmul_vv_nxv8f32_nxv8f32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmul_vv_nxv8f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfmul.vv v8, v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmul.nxv8f32( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfmul.mask.nxv8f32( - , - , - , - , - i64, - i64); - -define @intrinsic_vfmul_mask_vv_nxv8f32_nxv8f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv8f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfmul.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmul.mask.nxv8f32( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfmul.nxv16f32( - , - , - i64); - -define @intrinsic_vfmul_vv_nxv16f32_nxv16f32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmul_vv_nxv16f32_nxv16f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu -; CHECK-NEXT: vfmul.vv v8, v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmul.nxv16f32( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfmul.mask.nxv16f32( - , - , - , - , - i64, - i64); - -define @intrinsic_vfmul_mask_vv_nxv16f32_nxv16f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv16f32_nxv16f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vl8re32.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu -; CHECK-NEXT: vfmul.vv v8, v16, v24, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmul.mask.nxv16f32( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfmul.nxv1f64( - , - , - i64); - -define @intrinsic_vfmul_vv_nxv1f64_nxv1f64( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmul_vv_nxv1f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vfmul.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmul.nxv1f64( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfmul.mask.nxv1f64( - , - , - , - , - i64, - i64); - -define @intrinsic_vfmul_mask_vv_nxv1f64_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv1f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vfmul.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmul.mask.nxv1f64( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfmul.nxv2f64( - , - , - i64); - -define @intrinsic_vfmul_vv_nxv2f64_nxv2f64( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmul_vv_nxv2f64_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vfmul.vv v8, v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmul.nxv2f64( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfmul.mask.nxv2f64( - , - , - , - , - i64, - i64); - -define @intrinsic_vfmul_mask_vv_nxv2f64_nxv2f64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv2f64_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vfmul.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmul.mask.nxv2f64( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfmul.nxv4f64( - , - , - i64); - -define @intrinsic_vfmul_vv_nxv4f64_nxv4f64( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmul_vv_nxv4f64_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vfmul.vv v8, v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmul.nxv4f64( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfmul.mask.nxv4f64( - , - , - , - , - i64, - i64); - -define @intrinsic_vfmul_mask_vv_nxv4f64_nxv4f64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv4f64_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vfmul.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmul.mask.nxv4f64( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfmul.nxv8f64( - , - , - i64); - -define @intrinsic_vfmul_vv_nxv8f64_nxv8f64( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmul_vv_nxv8f64_nxv8f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu -; CHECK-NEXT: vfmul.vv v8, v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmul.nxv8f64( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfmul.mask.nxv8f64( - , - , - , - , - i64, - i64); - -define @intrinsic_vfmul_mask_vv_nxv8f64_nxv8f64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv8f64_nxv8f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vl8re64.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu -; CHECK-NEXT: vfmul.vv v8, v16, v24, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmul.mask.nxv8f64( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfmul.nxv1f16.f16( - , - half, - i64); - -define @intrinsic_vfmul_vf_nxv1f16_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmul_vf_nxv1f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfmul.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmul.nxv1f16.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfmul.mask.nxv1f16.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfmul_mask_vf_nxv1f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv1f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfmul.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmul.mask.nxv1f16.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfmul.nxv2f16.f16( - , - half, - i64); - -define @intrinsic_vfmul_vf_nxv2f16_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmul_vf_nxv2f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfmul.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmul.nxv2f16.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfmul.mask.nxv2f16.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfmul_mask_vf_nxv2f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv2f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfmul.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmul.mask.nxv2f16.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfmul.nxv4f16.f16( - , - half, - i64); - -define @intrinsic_vfmul_vf_nxv4f16_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmul_vf_nxv4f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfmul.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmul.nxv4f16.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfmul.mask.nxv4f16.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfmul_mask_vf_nxv4f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv4f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfmul.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmul.mask.nxv4f16.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfmul.nxv8f16.f16( - , - half, - i64); - -define @intrinsic_vfmul_vf_nxv8f16_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmul_vf_nxv8f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfmul.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmul.nxv8f16.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfmul.mask.nxv8f16.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfmul_mask_vf_nxv8f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv8f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfmul.vf v8, v10, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmul.mask.nxv8f16.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfmul.nxv16f16.f16( - , - half, - i64); - -define @intrinsic_vfmul_vf_nxv16f16_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmul_vf_nxv16f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfmul.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmul.nxv16f16.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfmul.mask.nxv16f16.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfmul_mask_vf_nxv16f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv16f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfmul.vf v8, v12, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmul.mask.nxv16f16.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfmul.nxv32f16.f16( - , - half, - i64); - -define @intrinsic_vfmul_vf_nxv32f16_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmul_vf_nxv32f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu -; CHECK-NEXT: vfmul.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmul.nxv32f16.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfmul.mask.nxv32f16.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfmul_mask_vf_nxv32f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv32f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu -; CHECK-NEXT: vfmul.vf v8, v16, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmul.mask.nxv32f16.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfmul.nxv1f32.f32( - , - float, - i64); - -define @intrinsic_vfmul_vf_nxv1f32_f32( %0, float %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmul_vf_nxv1f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfmul.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmul.nxv1f32.f32( - %0, - float %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfmul.mask.nxv1f32.f32( - , - , - float, - , - i64, - i64); - -define @intrinsic_vfmul_mask_vf_nxv1f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv1f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfmul.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmul.mask.nxv1f32.f32( - %0, - %1, - float %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfmul.nxv2f32.f32( - , - float, - i64); - -define @intrinsic_vfmul_vf_nxv2f32_f32( %0, float %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmul_vf_nxv2f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfmul.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmul.nxv2f32.f32( - %0, - float %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfmul.mask.nxv2f32.f32( - , - , - float, - , - i64, - i64); - -define @intrinsic_vfmul_mask_vf_nxv2f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv2f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfmul.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmul.mask.nxv2f32.f32( - %0, - %1, - float %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfmul.nxv4f32.f32( - , - float, - i64); - -define @intrinsic_vfmul_vf_nxv4f32_f32( %0, float %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmul_vf_nxv4f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfmul.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmul.nxv4f32.f32( - %0, - float %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfmul.mask.nxv4f32.f32( - , - , - float, - , - i64, - i64); - -define @intrinsic_vfmul_mask_vf_nxv4f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv4f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfmul.vf v8, v10, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmul.mask.nxv4f32.f32( - %0, - %1, - float %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfmul.nxv8f32.f32( - , - float, - i64); - -define @intrinsic_vfmul_vf_nxv8f32_f32( %0, float %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmul_vf_nxv8f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfmul.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmul.nxv8f32.f32( - %0, - float %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfmul.mask.nxv8f32.f32( - , - , - float, - , - i64, - i64); - -define @intrinsic_vfmul_mask_vf_nxv8f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv8f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfmul.vf v8, v12, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmul.mask.nxv8f32.f32( - %0, - %1, - float %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfmul.nxv16f32.f32( - , - float, - i64); - -define @intrinsic_vfmul_vf_nxv16f32_f32( %0, float %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmul_vf_nxv16f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu -; CHECK-NEXT: vfmul.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmul.nxv16f32.f32( - %0, - float %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfmul.mask.nxv16f32.f32( - , - , - float, - , - i64, - i64); - -define @intrinsic_vfmul_mask_vf_nxv16f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv16f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu -; CHECK-NEXT: vfmul.vf v8, v16, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmul.mask.nxv16f32.f32( - %0, - %1, - float %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfmul.nxv1f64.f64( - , - double, - i64); - -define @intrinsic_vfmul_vf_nxv1f64_f64( %0, double %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmul_vf_nxv1f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vfmul.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmul.nxv1f64.f64( - %0, - double %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfmul.mask.nxv1f64.f64( - , - , - double, - , - i64, - i64); - -define @intrinsic_vfmul_mask_vf_nxv1f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv1f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vfmul.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmul.mask.nxv1f64.f64( - %0, - %1, - double %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfmul.nxv2f64.f64( - , - double, - i64); - -define @intrinsic_vfmul_vf_nxv2f64_f64( %0, double %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmul_vf_nxv2f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vfmul.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmul.nxv2f64.f64( - %0, - double %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfmul.mask.nxv2f64.f64( - , - , - double, - , - i64, - i64); - -define @intrinsic_vfmul_mask_vf_nxv2f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv2f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vfmul.vf v8, v10, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmul.mask.nxv2f64.f64( - %0, - %1, - double %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfmul.nxv4f64.f64( - , - double, - i64); - -define @intrinsic_vfmul_vf_nxv4f64_f64( %0, double %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmul_vf_nxv4f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vfmul.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmul.nxv4f64.f64( - %0, - double %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfmul.mask.nxv4f64.f64( - , - , - double, - , - i64, - i64); - -define @intrinsic_vfmul_mask_vf_nxv4f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv4f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vfmul.vf v8, v12, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmul.mask.nxv4f64.f64( - %0, - %1, - double %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfmul.nxv8f64.f64( - , - double, - i64); - -define @intrinsic_vfmul_vf_nxv8f64_f64( %0, double %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmul_vf_nxv8f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu -; CHECK-NEXT: vfmul.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmul.nxv8f64.f64( - %0, - double %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfmul.mask.nxv8f64.f64( - , - , - double, - , - i64, - i64); - -define @intrinsic_vfmul_mask_vf_nxv8f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv8f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu -; CHECK-NEXT: vfmul.vf v8, v16, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmul.mask.nxv8f64.f64( - %0, - %1, - double %2, - %3, - i64 %4, i64 1) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmul-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfmul.ll similarity index 86% rename from llvm/test/CodeGen/RISCV/rvv/vfmul-rv32.ll rename to llvm/test/CodeGen/RISCV/rvv/vfmul.ll index 50ebccd92e64..0f4c73802561 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfmul-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmul.ll @@ -1,12 +1,14 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: -target-abi=ilp32d < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfmul.nxv1f16.nxv1f16( , , - i32); + iXLen); -define @intrinsic_vfmul_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, i32 %2) nounwind { +define @intrinsic_vfmul_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -16,7 +18,7 @@ entry: %a = call @llvm.riscv.vfmul.nxv1f16.nxv1f16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -26,10 +28,10 @@ declare @llvm.riscv.vfmul.mask.nxv1f16.nxv1f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfmul_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmul_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -41,7 +43,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -49,9 +51,9 @@ entry: declare @llvm.riscv.vfmul.nxv2f16.nxv2f16( , , - i32); + iXLen); -define @intrinsic_vfmul_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, i32 %2) nounwind { +define @intrinsic_vfmul_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -61,7 +63,7 @@ entry: %a = call @llvm.riscv.vfmul.nxv2f16.nxv2f16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -71,10 +73,10 @@ declare @llvm.riscv.vfmul.mask.nxv2f16.nxv2f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfmul_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmul_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -86,7 +88,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -94,9 +96,9 @@ entry: declare @llvm.riscv.vfmul.nxv4f16.nxv4f16( , , - i32); + iXLen); -define @intrinsic_vfmul_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, i32 %2) nounwind { +define @intrinsic_vfmul_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -106,7 +108,7 @@ entry: %a = call @llvm.riscv.vfmul.nxv4f16.nxv4f16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -116,10 +118,10 @@ declare @llvm.riscv.vfmul.mask.nxv4f16.nxv4f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfmul_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmul_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -131,7 +133,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -139,9 +141,9 @@ entry: declare @llvm.riscv.vfmul.nxv8f16.nxv8f16( , , - i32); + iXLen); -define @intrinsic_vfmul_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, i32 %2) nounwind { +define @intrinsic_vfmul_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -151,7 +153,7 @@ entry: %a = call @llvm.riscv.vfmul.nxv8f16.nxv8f16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -161,10 +163,10 @@ declare @llvm.riscv.vfmul.mask.nxv8f16.nxv8f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfmul_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmul_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -176,7 +178,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -184,9 +186,9 @@ entry: declare @llvm.riscv.vfmul.nxv16f16.nxv16f16( , , - i32); + iXLen); -define @intrinsic_vfmul_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, i32 %2) nounwind { +define @intrinsic_vfmul_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -196,7 +198,7 @@ entry: %a = call @llvm.riscv.vfmul.nxv16f16.nxv16f16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -206,10 +208,10 @@ declare @llvm.riscv.vfmul.mask.nxv16f16.nxv16f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfmul_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmul_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -221,7 +223,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -229,9 +231,9 @@ entry: declare @llvm.riscv.vfmul.nxv32f16.nxv32f16( , , - i32); + iXLen); -define @intrinsic_vfmul_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, i32 %2) nounwind { +define @intrinsic_vfmul_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vv_nxv32f16_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu @@ -241,7 +243,7 @@ entry: %a = call @llvm.riscv.vfmul.nxv32f16.nxv32f16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -251,10 +253,10 @@ declare @llvm.riscv.vfmul.mask.nxv32f16.nxv32f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfmul_mask_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmul_mask_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv32f16_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re16.v v24, (a0) @@ -267,7 +269,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -275,9 +277,9 @@ entry: declare @llvm.riscv.vfmul.nxv1f32.nxv1f32( , , - i32); + iXLen); -define @intrinsic_vfmul_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, i32 %2) nounwind { +define @intrinsic_vfmul_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -287,7 +289,7 @@ entry: %a = call @llvm.riscv.vfmul.nxv1f32.nxv1f32( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -297,10 +299,10 @@ declare @llvm.riscv.vfmul.mask.nxv1f32.nxv1f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfmul_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmul_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -312,7 +314,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -320,9 +322,9 @@ entry: declare @llvm.riscv.vfmul.nxv2f32.nxv2f32( , , - i32); + iXLen); -define @intrinsic_vfmul_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, i32 %2) nounwind { +define @intrinsic_vfmul_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -332,7 +334,7 @@ entry: %a = call @llvm.riscv.vfmul.nxv2f32.nxv2f32( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -342,10 +344,10 @@ declare @llvm.riscv.vfmul.mask.nxv2f32.nxv2f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfmul_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmul_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -357,7 +359,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -365,9 +367,9 @@ entry: declare @llvm.riscv.vfmul.nxv4f32.nxv4f32( , , - i32); + iXLen); -define @intrinsic_vfmul_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, i32 %2) nounwind { +define @intrinsic_vfmul_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -377,7 +379,7 @@ entry: %a = call @llvm.riscv.vfmul.nxv4f32.nxv4f32( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -387,10 +389,10 @@ declare @llvm.riscv.vfmul.mask.nxv4f32.nxv4f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfmul_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmul_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -402,7 +404,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -410,9 +412,9 @@ entry: declare @llvm.riscv.vfmul.nxv8f32.nxv8f32( , , - i32); + iXLen); -define @intrinsic_vfmul_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, i32 %2) nounwind { +define @intrinsic_vfmul_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -422,7 +424,7 @@ entry: %a = call @llvm.riscv.vfmul.nxv8f32.nxv8f32( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -432,10 +434,10 @@ declare @llvm.riscv.vfmul.mask.nxv8f32.nxv8f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfmul_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmul_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -447,7 +449,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -455,9 +457,9 @@ entry: declare @llvm.riscv.vfmul.nxv16f32.nxv16f32( , , - i32); + iXLen); -define @intrinsic_vfmul_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, i32 %2) nounwind { +define @intrinsic_vfmul_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vv_nxv16f32_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu @@ -467,7 +469,7 @@ entry: %a = call @llvm.riscv.vfmul.nxv16f32.nxv16f32( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -477,10 +479,10 @@ declare @llvm.riscv.vfmul.mask.nxv16f32.nxv16f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfmul_mask_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmul_mask_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv16f32_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re32.v v24, (a0) @@ -493,7 +495,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -501,9 +503,9 @@ entry: declare @llvm.riscv.vfmul.nxv1f64.nxv1f64( , , - i32); + iXLen); -define @intrinsic_vfmul_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, i32 %2) nounwind { +define @intrinsic_vfmul_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu @@ -513,7 +515,7 @@ entry: %a = call @llvm.riscv.vfmul.nxv1f64.nxv1f64( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -523,10 +525,10 @@ declare @llvm.riscv.vfmul.mask.nxv1f64.nxv1f64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfmul_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmul_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu @@ -538,7 +540,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -546,9 +548,9 @@ entry: declare @llvm.riscv.vfmul.nxv2f64.nxv2f64( , , - i32); + iXLen); -define @intrinsic_vfmul_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, i32 %2) nounwind { +define @intrinsic_vfmul_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu @@ -558,7 +560,7 @@ entry: %a = call @llvm.riscv.vfmul.nxv2f64.nxv2f64( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -568,10 +570,10 @@ declare @llvm.riscv.vfmul.mask.nxv2f64.nxv2f64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfmul_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmul_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu @@ -583,7 +585,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -591,9 +593,9 @@ entry: declare @llvm.riscv.vfmul.nxv4f64.nxv4f64( , , - i32); + iXLen); -define @intrinsic_vfmul_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, i32 %2) nounwind { +define @intrinsic_vfmul_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu @@ -603,7 +605,7 @@ entry: %a = call @llvm.riscv.vfmul.nxv4f64.nxv4f64( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -613,10 +615,10 @@ declare @llvm.riscv.vfmul.mask.nxv4f64.nxv4f64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfmul_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmul_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu @@ -628,7 +630,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -636,9 +638,9 @@ entry: declare @llvm.riscv.vfmul.nxv8f64.nxv8f64( , , - i32); + iXLen); -define @intrinsic_vfmul_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, i32 %2) nounwind { +define @intrinsic_vfmul_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vv_nxv8f64_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu @@ -648,7 +650,7 @@ entry: %a = call @llvm.riscv.vfmul.nxv8f64.nxv8f64( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -658,10 +660,10 @@ declare @llvm.riscv.vfmul.mask.nxv8f64.nxv8f64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfmul_mask_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfmul_mask_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv8f64_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re64.v v24, (a0) @@ -674,7 +676,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -682,9 +684,9 @@ entry: declare @llvm.riscv.vfmul.nxv1f16.f16( , half, - i32); + iXLen); -define @intrinsic_vfmul_vf_nxv1f16_nxv1f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfmul_vf_nxv1f16_nxv1f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -694,7 +696,7 @@ entry: %a = call @llvm.riscv.vfmul.nxv1f16.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -704,10 +706,10 @@ declare @llvm.riscv.vfmul.mask.nxv1f16.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfmul_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfmul_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -719,7 +721,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -727,9 +729,9 @@ entry: declare @llvm.riscv.vfmul.nxv2f16.f16( , half, - i32); + iXLen); -define @intrinsic_vfmul_vf_nxv2f16_nxv2f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfmul_vf_nxv2f16_nxv2f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -739,7 +741,7 @@ entry: %a = call @llvm.riscv.vfmul.nxv2f16.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -749,10 +751,10 @@ declare @llvm.riscv.vfmul.mask.nxv2f16.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfmul_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfmul_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -764,7 +766,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -772,9 +774,9 @@ entry: declare @llvm.riscv.vfmul.nxv4f16.f16( , half, - i32); + iXLen); -define @intrinsic_vfmul_vf_nxv4f16_nxv4f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfmul_vf_nxv4f16_nxv4f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -784,7 +786,7 @@ entry: %a = call @llvm.riscv.vfmul.nxv4f16.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -794,10 +796,10 @@ declare @llvm.riscv.vfmul.mask.nxv4f16.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfmul_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfmul_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -809,7 +811,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -817,9 +819,9 @@ entry: declare @llvm.riscv.vfmul.nxv8f16.f16( , half, - i32); + iXLen); -define @intrinsic_vfmul_vf_nxv8f16_nxv8f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfmul_vf_nxv8f16_nxv8f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -829,7 +831,7 @@ entry: %a = call @llvm.riscv.vfmul.nxv8f16.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -839,10 +841,10 @@ declare @llvm.riscv.vfmul.mask.nxv8f16.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfmul_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfmul_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -854,7 +856,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -862,9 +864,9 @@ entry: declare @llvm.riscv.vfmul.nxv16f16.f16( , half, - i32); + iXLen); -define @intrinsic_vfmul_vf_nxv16f16_nxv16f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfmul_vf_nxv16f16_nxv16f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -874,7 +876,7 @@ entry: %a = call @llvm.riscv.vfmul.nxv16f16.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -884,10 +886,10 @@ declare @llvm.riscv.vfmul.mask.nxv16f16.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfmul_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfmul_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -899,7 +901,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -907,9 +909,9 @@ entry: declare @llvm.riscv.vfmul.nxv32f16.f16( , half, - i32); + iXLen); -define @intrinsic_vfmul_vf_nxv32f16_nxv32f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfmul_vf_nxv32f16_nxv32f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu @@ -919,7 +921,7 @@ entry: %a = call @llvm.riscv.vfmul.nxv32f16.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -929,10 +931,10 @@ declare @llvm.riscv.vfmul.mask.nxv32f16.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfmul_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfmul_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu @@ -944,7 +946,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -952,9 +954,9 @@ entry: declare @llvm.riscv.vfmul.nxv1f32.f32( , float, - i32); + iXLen); -define @intrinsic_vfmul_vf_nxv1f32_nxv1f32_f32( %0, float %1, i32 %2) nounwind { +define @intrinsic_vfmul_vf_nxv1f32_nxv1f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -964,7 +966,7 @@ entry: %a = call @llvm.riscv.vfmul.nxv1f32.f32( %0, float %1, - i32 %2) + iXLen %2) ret %a } @@ -974,10 +976,10 @@ declare @llvm.riscv.vfmul.mask.nxv1f32.f32( , float, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfmul_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +define @intrinsic_vfmul_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -989,7 +991,7 @@ entry: %1, float %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -997,9 +999,9 @@ entry: declare @llvm.riscv.vfmul.nxv2f32.f32( , float, - i32); + iXLen); -define @intrinsic_vfmul_vf_nxv2f32_nxv2f32_f32( %0, float %1, i32 %2) nounwind { +define @intrinsic_vfmul_vf_nxv2f32_nxv2f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -1009,7 +1011,7 @@ entry: %a = call @llvm.riscv.vfmul.nxv2f32.f32( %0, float %1, - i32 %2) + iXLen %2) ret %a } @@ -1019,10 +1021,10 @@ declare @llvm.riscv.vfmul.mask.nxv2f32.f32( , float, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfmul_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +define @intrinsic_vfmul_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -1034,7 +1036,7 @@ entry: %1, float %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1042,9 +1044,9 @@ entry: declare @llvm.riscv.vfmul.nxv4f32.f32( , float, - i32); + iXLen); -define @intrinsic_vfmul_vf_nxv4f32_nxv4f32_f32( %0, float %1, i32 %2) nounwind { +define @intrinsic_vfmul_vf_nxv4f32_nxv4f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -1054,7 +1056,7 @@ entry: %a = call @llvm.riscv.vfmul.nxv4f32.f32( %0, float %1, - i32 %2) + iXLen %2) ret %a } @@ -1064,10 +1066,10 @@ declare @llvm.riscv.vfmul.mask.nxv4f32.f32( , float, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfmul_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +define @intrinsic_vfmul_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -1079,7 +1081,7 @@ entry: %1, float %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1087,9 +1089,9 @@ entry: declare @llvm.riscv.vfmul.nxv8f32.f32( , float, - i32); + iXLen); -define @intrinsic_vfmul_vf_nxv8f32_nxv8f32_f32( %0, float %1, i32 %2) nounwind { +define @intrinsic_vfmul_vf_nxv8f32_nxv8f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -1099,7 +1101,7 @@ entry: %a = call @llvm.riscv.vfmul.nxv8f32.f32( %0, float %1, - i32 %2) + iXLen %2) ret %a } @@ -1109,10 +1111,10 @@ declare @llvm.riscv.vfmul.mask.nxv8f32.f32( , float, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfmul_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +define @intrinsic_vfmul_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -1124,7 +1126,7 @@ entry: %1, float %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1132,9 +1134,9 @@ entry: declare @llvm.riscv.vfmul.nxv16f32.f32( , float, - i32); + iXLen); -define @intrinsic_vfmul_vf_nxv16f32_nxv16f32_f32( %0, float %1, i32 %2) nounwind { +define @intrinsic_vfmul_vf_nxv16f32_nxv16f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu @@ -1144,7 +1146,7 @@ entry: %a = call @llvm.riscv.vfmul.nxv16f32.f32( %0, float %1, - i32 %2) + iXLen %2) ret %a } @@ -1154,10 +1156,10 @@ declare @llvm.riscv.vfmul.mask.nxv16f32.f32( , float, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfmul_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +define @intrinsic_vfmul_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu @@ -1169,7 +1171,7 @@ entry: %1, float %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1177,9 +1179,9 @@ entry: declare @llvm.riscv.vfmul.nxv1f64.f64( , double, - i32); + iXLen); -define @intrinsic_vfmul_vf_nxv1f64_nxv1f64_f64( %0, double %1, i32 %2) nounwind { +define @intrinsic_vfmul_vf_nxv1f64_nxv1f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu @@ -1189,7 +1191,7 @@ entry: %a = call @llvm.riscv.vfmul.nxv1f64.f64( %0, double %1, - i32 %2) + iXLen %2) ret %a } @@ -1199,10 +1201,10 @@ declare @llvm.riscv.vfmul.mask.nxv1f64.f64( , double, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfmul_mask_vf_nxv1f64_nxv1f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { +define @intrinsic_vfmul_mask_vf_nxv1f64_nxv1f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu @@ -1214,7 +1216,7 @@ entry: %1, double %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1222,9 +1224,9 @@ entry: declare @llvm.riscv.vfmul.nxv2f64.f64( , double, - i32); + iXLen); -define @intrinsic_vfmul_vf_nxv2f64_nxv2f64_f64( %0, double %1, i32 %2) nounwind { +define @intrinsic_vfmul_vf_nxv2f64_nxv2f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu @@ -1234,7 +1236,7 @@ entry: %a = call @llvm.riscv.vfmul.nxv2f64.f64( %0, double %1, - i32 %2) + iXLen %2) ret %a } @@ -1244,10 +1246,10 @@ declare @llvm.riscv.vfmul.mask.nxv2f64.f64( , double, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfmul_mask_vf_nxv2f64_nxv2f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { +define @intrinsic_vfmul_mask_vf_nxv2f64_nxv2f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu @@ -1259,7 +1261,7 @@ entry: %1, double %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1267,9 +1269,9 @@ entry: declare @llvm.riscv.vfmul.nxv4f64.f64( , double, - i32); + iXLen); -define @intrinsic_vfmul_vf_nxv4f64_nxv4f64_f64( %0, double %1, i32 %2) nounwind { +define @intrinsic_vfmul_vf_nxv4f64_nxv4f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu @@ -1279,7 +1281,7 @@ entry: %a = call @llvm.riscv.vfmul.nxv4f64.f64( %0, double %1, - i32 %2) + iXLen %2) ret %a } @@ -1289,10 +1291,10 @@ declare @llvm.riscv.vfmul.mask.nxv4f64.f64( , double, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfmul_mask_vf_nxv4f64_nxv4f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { +define @intrinsic_vfmul_mask_vf_nxv4f64_nxv4f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu @@ -1304,7 +1306,7 @@ entry: %1, double %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1312,9 +1314,9 @@ entry: declare @llvm.riscv.vfmul.nxv8f64.f64( , double, - i32); + iXLen); -define @intrinsic_vfmul_vf_nxv8f64_nxv8f64_f64( %0, double %1, i32 %2) nounwind { +define @intrinsic_vfmul_vf_nxv8f64_nxv8f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu @@ -1324,7 +1326,7 @@ entry: %a = call @llvm.riscv.vfmul.nxv8f64.f64( %0, double %1, - i32 %2) + iXLen %2) ret %a } @@ -1334,10 +1336,10 @@ declare @llvm.riscv.vfmul.mask.nxv8f64.f64( , double, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfmul_mask_vf_nxv8f64_nxv8f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { +define @intrinsic_vfmul_mask_vf_nxv8f64_nxv8f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu @@ -1349,7 +1351,7 @@ entry: %1, double %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmv.s.f-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfmv.s.f-rv64.ll deleted file mode 100644 index ee619309b9e3..000000000000 --- a/llvm/test/CodeGen/RISCV/rvv/vfmv.s.f-rv64.ll +++ /dev/null @@ -1,197 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+d,+v,+zfh -target-abi lp64d -verify-machineinstrs < %s | FileCheck %s - -declare @llvm.riscv.vfmv.s.f.nxv1f16(, half, i64) - -define @intrinsic_vfmv.s.f_f_nxv1f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu -; CHECK-NEXT: vfmv.s.f v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmv.s.f.nxv1f16( %0, half %1, i64 %2) - ret %a -} - -declare @llvm.riscv.vfmv.s.f.nxv2f16(, half, i64) - -define @intrinsic_vfmv.s.f_f_nxv2f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu -; CHECK-NEXT: vfmv.s.f v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmv.s.f.nxv2f16( %0, half %1, i64 %2) - ret %a -} - -declare @llvm.riscv.vfmv.s.f.nxv4f16(, half, i64) - -define @intrinsic_vfmv.s.f_f_nxv4f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu -; CHECK-NEXT: vfmv.s.f v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmv.s.f.nxv4f16( %0, half %1, i64 %2) - ret %a -} - -declare @llvm.riscv.vfmv.s.f.nxv8f16(, half, i64) - -define @intrinsic_vfmv.s.f_f_nxv8f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu -; CHECK-NEXT: vfmv.s.f v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmv.s.f.nxv8f16( %0, half %1, i64 %2) - ret %a -} - -declare @llvm.riscv.vfmv.s.f.nxv16f16(, half, i64) - -define @intrinsic_vfmv.s.f_f_nxv16f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu -; CHECK-NEXT: vfmv.s.f v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmv.s.f.nxv16f16( %0, half %1, i64 %2) - ret %a -} - -declare @llvm.riscv.vfmv.s.f.nxv32f16(, half, i64) - -define @intrinsic_vfmv.s.f_f_nxv32f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv32f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu -; CHECK-NEXT: vfmv.s.f v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmv.s.f.nxv32f16( %0, half %1, i64 %2) - ret %a -} - -declare @llvm.riscv.vfmv.s.f.nxv1f32(, float, i64) - -define @intrinsic_vfmv.s.f_f_nxv1f32( %0, float %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu -; CHECK-NEXT: vfmv.s.f v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmv.s.f.nxv1f32( %0, float %1, i64 %2) - ret %a -} - -declare @llvm.riscv.vfmv.s.f.nxv2f32(, float, i64) - -define @intrinsic_vfmv.s.f_f_nxv2f32( %0, float %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu -; CHECK-NEXT: vfmv.s.f v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmv.s.f.nxv2f32( %0, float %1, i64 %2) - ret %a -} - -declare @llvm.riscv.vfmv.s.f.nxv4f32(, float, i64) - -define @intrinsic_vfmv.s.f_f_nxv4f32( %0, float %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu -; CHECK-NEXT: vfmv.s.f v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmv.s.f.nxv4f32( %0, float %1, i64 %2) - ret %a -} - -declare @llvm.riscv.vfmv.s.f.nxv8f32(, float, i64) - -define @intrinsic_vfmv.s.f_f_nxv8f32( %0, float %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu -; CHECK-NEXT: vfmv.s.f v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmv.s.f.nxv8f32( %0, float %1, i64 %2) - ret %a -} - -declare @llvm.riscv.vfmv.s.f.nxv16f32(, float, i64) - -define @intrinsic_vfmv.s.f_f_nxv16f32( %0, float %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv16f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu -; CHECK-NEXT: vfmv.s.f v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmv.s.f.nxv16f32( %0, float %1, i64 %2) - ret %a -} - -declare @llvm.riscv.vfmv.s.f.nxv1f64(, double, i64) - -define @intrinsic_vfmv.s.f_f_nxv1f64( %0, double %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu -; CHECK-NEXT: vfmv.s.f v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmv.s.f.nxv1f64( %0, double %1, i64 %2) - ret %a -} - -declare @llvm.riscv.vfmv.s.f.nxv2f64(, double, i64) - -define @intrinsic_vfmv.s.f_f_nxv2f64( %0, double %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu -; CHECK-NEXT: vfmv.s.f v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmv.s.f.nxv2f64( %0, double %1, i64 %2) - ret %a -} - -declare @llvm.riscv.vfmv.s.f.nxv4f64(, double, i64) - -define @intrinsic_vfmv.s.f_f_nxv4f64( %0, double %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu -; CHECK-NEXT: vfmv.s.f v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmv.s.f.nxv4f64( %0, double %1, i64 %2) - ret %a -} - -declare @llvm.riscv.vfmv.s.f.nxv8f64(, double, i64) - -define @intrinsic_vfmv.s.f_f_nxv8f64( %0, double %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv8f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu -; CHECK-NEXT: vfmv.s.f v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmv.s.f.nxv8f64( %0, double %1, i64 %2) - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmv.s.f-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfmv.s.f.ll similarity index 74% rename from llvm/test/CodeGen/RISCV/rvv/vfmv.s.f-rv32.ll rename to llvm/test/CodeGen/RISCV/rvv/vfmv.s.f.ll index 4d47c000788f..8464dc2f6299 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfmv.s.f-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmv.s.f.ll @@ -1,197 +1,200 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+d,+v,+zfh -target-abi ilp32d -verify-machineinstrs < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfmv.s.f.nxv1f16(, half, i32) +declare @llvm.riscv.vfmv.s.f.nxv1f16(, half, iXLen) -define @intrinsic_vfmv.s.f_f_nxv1f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfmv.s.f_f_nxv1f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu ; CHECK-NEXT: vfmv.s.f v8, fa0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vfmv.s.f.nxv1f16( %0, half %1, i32 %2) + %a = call @llvm.riscv.vfmv.s.f.nxv1f16( %0, half %1, iXLen %2) ret %a } -declare @llvm.riscv.vfmv.s.f.nxv2f16(, half, i32) +declare @llvm.riscv.vfmv.s.f.nxv2f16(, half, iXLen) -define @intrinsic_vfmv.s.f_f_nxv2f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfmv.s.f_f_nxv2f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu ; CHECK-NEXT: vfmv.s.f v8, fa0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vfmv.s.f.nxv2f16( %0, half %1, i32 %2) + %a = call @llvm.riscv.vfmv.s.f.nxv2f16( %0, half %1, iXLen %2) ret %a } -declare @llvm.riscv.vfmv.s.f.nxv4f16(, half, i32) +declare @llvm.riscv.vfmv.s.f.nxv4f16(, half, iXLen) -define @intrinsic_vfmv.s.f_f_nxv4f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfmv.s.f_f_nxv4f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu ; CHECK-NEXT: vfmv.s.f v8, fa0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vfmv.s.f.nxv4f16( %0, half %1, i32 %2) + %a = call @llvm.riscv.vfmv.s.f.nxv4f16( %0, half %1, iXLen %2) ret %a } -declare @llvm.riscv.vfmv.s.f.nxv8f16(, half, i32) +declare @llvm.riscv.vfmv.s.f.nxv8f16(, half, iXLen) -define @intrinsic_vfmv.s.f_f_nxv8f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfmv.s.f_f_nxv8f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu ; CHECK-NEXT: vfmv.s.f v8, fa0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vfmv.s.f.nxv8f16( %0, half %1, i32 %2) + %a = call @llvm.riscv.vfmv.s.f.nxv8f16( %0, half %1, iXLen %2) ret %a } -declare @llvm.riscv.vfmv.s.f.nxv16f16(, half, i32) +declare @llvm.riscv.vfmv.s.f.nxv16f16(, half, iXLen) -define @intrinsic_vfmv.s.f_f_nxv16f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfmv.s.f_f_nxv16f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu ; CHECK-NEXT: vfmv.s.f v8, fa0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vfmv.s.f.nxv16f16( %0, half %1, i32 %2) + %a = call @llvm.riscv.vfmv.s.f.nxv16f16( %0, half %1, iXLen %2) ret %a } -declare @llvm.riscv.vfmv.s.f.nxv32f16(, half, i32) +declare @llvm.riscv.vfmv.s.f.nxv32f16(, half, iXLen) -define @intrinsic_vfmv.s.f_f_nxv32f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfmv.s.f_f_nxv32f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv32f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu ; CHECK-NEXT: vfmv.s.f v8, fa0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vfmv.s.f.nxv32f16( %0, half %1, i32 %2) + %a = call @llvm.riscv.vfmv.s.f.nxv32f16( %0, half %1, iXLen %2) ret %a } -declare @llvm.riscv.vfmv.s.f.nxv1f32(, float, i32) +declare @llvm.riscv.vfmv.s.f.nxv1f32(, float, iXLen) -define @intrinsic_vfmv.s.f_f_nxv1f32( %0, float %1, i32 %2) nounwind { +define @intrinsic_vfmv.s.f_f_nxv1f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu ; CHECK-NEXT: vfmv.s.f v8, fa0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vfmv.s.f.nxv1f32( %0, float %1, i32 %2) + %a = call @llvm.riscv.vfmv.s.f.nxv1f32( %0, float %1, iXLen %2) ret %a } -declare @llvm.riscv.vfmv.s.f.nxv2f32(, float, i32) +declare @llvm.riscv.vfmv.s.f.nxv2f32(, float, iXLen) -define @intrinsic_vfmv.s.f_f_nxv2f32( %0, float %1, i32 %2) nounwind { +define @intrinsic_vfmv.s.f_f_nxv2f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu ; CHECK-NEXT: vfmv.s.f v8, fa0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vfmv.s.f.nxv2f32( %0, float %1, i32 %2) + %a = call @llvm.riscv.vfmv.s.f.nxv2f32( %0, float %1, iXLen %2) ret %a } -declare @llvm.riscv.vfmv.s.f.nxv4f32(, float, i32) +declare @llvm.riscv.vfmv.s.f.nxv4f32(, float, iXLen) -define @intrinsic_vfmv.s.f_f_nxv4f32( %0, float %1, i32 %2) nounwind { +define @intrinsic_vfmv.s.f_f_nxv4f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu ; CHECK-NEXT: vfmv.s.f v8, fa0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vfmv.s.f.nxv4f32( %0, float %1, i32 %2) + %a = call @llvm.riscv.vfmv.s.f.nxv4f32( %0, float %1, iXLen %2) ret %a } -declare @llvm.riscv.vfmv.s.f.nxv8f32(, float, i32) +declare @llvm.riscv.vfmv.s.f.nxv8f32(, float, iXLen) -define @intrinsic_vfmv.s.f_f_nxv8f32( %0, float %1, i32 %2) nounwind { +define @intrinsic_vfmv.s.f_f_nxv8f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu ; CHECK-NEXT: vfmv.s.f v8, fa0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vfmv.s.f.nxv8f32( %0, float %1, i32 %2) + %a = call @llvm.riscv.vfmv.s.f.nxv8f32( %0, float %1, iXLen %2) ret %a } -declare @llvm.riscv.vfmv.s.f.nxv16f32(, float, i32) +declare @llvm.riscv.vfmv.s.f.nxv16f32(, float, iXLen) -define @intrinsic_vfmv.s.f_f_nxv16f32( %0, float %1, i32 %2) nounwind { +define @intrinsic_vfmv.s.f_f_nxv16f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv16f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu ; CHECK-NEXT: vfmv.s.f v8, fa0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vfmv.s.f.nxv16f32( %0, float %1, i32 %2) + %a = call @llvm.riscv.vfmv.s.f.nxv16f32( %0, float %1, iXLen %2) ret %a } -declare @llvm.riscv.vfmv.s.f.nxv1f64(, double, i32) +declare @llvm.riscv.vfmv.s.f.nxv1f64(, double, iXLen) -define @intrinsic_vfmv.s.f_f_nxv1f64( %0, double %1, i32 %2) nounwind { +define @intrinsic_vfmv.s.f_f_nxv1f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu ; CHECK-NEXT: vfmv.s.f v8, fa0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vfmv.s.f.nxv1f64( %0, double %1, i32 %2) + %a = call @llvm.riscv.vfmv.s.f.nxv1f64( %0, double %1, iXLen %2) ret %a } -declare @llvm.riscv.vfmv.s.f.nxv2f64(, double, i32) +declare @llvm.riscv.vfmv.s.f.nxv2f64(, double, iXLen) -define @intrinsic_vfmv.s.f_f_nxv2f64( %0, double %1, i32 %2) nounwind { +define @intrinsic_vfmv.s.f_f_nxv2f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu ; CHECK-NEXT: vfmv.s.f v8, fa0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vfmv.s.f.nxv2f64( %0, double %1, i32 %2) + %a = call @llvm.riscv.vfmv.s.f.nxv2f64( %0, double %1, iXLen %2) ret %a } -declare @llvm.riscv.vfmv.s.f.nxv4f64(, double, i32) +declare @llvm.riscv.vfmv.s.f.nxv4f64(, double, iXLen) -define @intrinsic_vfmv.s.f_f_nxv4f64( %0, double %1, i32 %2) nounwind { +define @intrinsic_vfmv.s.f_f_nxv4f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu ; CHECK-NEXT: vfmv.s.f v8, fa0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vfmv.s.f.nxv4f64( %0, double %1, i32 %2) + %a = call @llvm.riscv.vfmv.s.f.nxv4f64( %0, double %1, iXLen %2) ret %a } -declare @llvm.riscv.vfmv.s.f.nxv8f64(, double, i32) +declare @llvm.riscv.vfmv.s.f.nxv8f64(, double, iXLen) -define @intrinsic_vfmv.s.f_f_nxv8f64( %0, double %1, i32 %2) nounwind { +define @intrinsic_vfmv.s.f_f_nxv8f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv8f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu ; CHECK-NEXT: vfmv.s.f v8, fa0 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vfmv.s.f.nxv8f64( %0, double %1, i32 %2) + %a = call @llvm.riscv.vfmv.s.f.nxv8f64( %0, double %1, iXLen %2) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmv.v.f-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfmv.v.f-rv64.ll deleted file mode 100644 index 1c4d9f6e89dc..000000000000 --- a/llvm/test/CodeGen/RISCV/rvv/vfmv.v.f-rv64.ll +++ /dev/null @@ -1,482 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -target-abi lp64d -verify-machineinstrs \ -; RUN: < %s | FileCheck %s -declare @llvm.riscv.vfmv.v.f.nxv1f16( - half, - i64); - -define @intrinsic_vfmv.v.f_f_nxv1f16(half %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfmv.v.f v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmv.v.f.nxv1f16( - half %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfmv.v.f.nxv2f16( - half, - i64); - -define @intrinsic_vfmv.v.f_f_nxv2f16(half %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfmv.v.f v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmv.v.f.nxv2f16( - half %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfmv.v.f.nxv4f16( - half, - i64); - -define @intrinsic_vfmv.v.f_f_nxv4f16(half %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmv.v.f.nxv4f16( - half %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfmv.v.f.nxv8f16( - half, - i64); - -define @intrinsic_vfmv.v.f_f_nxv8f16(half %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfmv.v.f v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmv.v.f.nxv8f16( - half %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfmv.v.f.nxv16f16( - half, - i64); - -define @intrinsic_vfmv.v.f_f_nxv16f16(half %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfmv.v.f v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmv.v.f.nxv16f16( - half %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfmv.v.f.nxv32f16( - half, - i64); - -define @intrinsic_vfmv.v.f_f_nxv32f16(half %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv32f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu -; CHECK-NEXT: vfmv.v.f v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmv.v.f.nxv32f16( - half %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfmv.v.f.nxv1f32( - float, - i64); - -define @intrinsic_vfmv.v.f_f_nxv1f32(float %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfmv.v.f v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmv.v.f.nxv1f32( - float %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfmv.v.f.nxv2f32( - float, - i64); - -define @intrinsic_vfmv.v.f_f_nxv2f32(float %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmv.v.f.nxv2f32( - float %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfmv.v.f.nxv4f32( - float, - i64); - -define @intrinsic_vfmv.v.f_f_nxv4f32(float %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfmv.v.f v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmv.v.f.nxv4f32( - float %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfmv.v.f.nxv8f32( - float, - i64); - -define @intrinsic_vfmv.v.f_f_nxv8f32(float %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfmv.v.f v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmv.v.f.nxv8f32( - float %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfmv.v.f.nxv16f32( - float, - i64); - -define @intrinsic_vfmv.v.f_f_nxv16f32(float %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv16f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu -; CHECK-NEXT: vfmv.v.f v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmv.v.f.nxv16f32( - float %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfmv.v.f.nxv1f64( - double, - i64); - -define @intrinsic_vfmv.v.f_f_nxv1f64(double %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmv.v.f.nxv1f64( - double %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfmv.v.f.nxv2f64( - double, - i64); - -define @intrinsic_vfmv.v.f_f_nxv2f64(double %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vfmv.v.f v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmv.v.f.nxv2f64( - double %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfmv.v.f.nxv4f64( - double, - i64); - -define @intrinsic_vfmv.v.f_f_nxv4f64(double %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vfmv.v.f v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmv.v.f.nxv4f64( - double %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfmv.v.f.nxv8f64( - double, - i64); - -define @intrinsic_vfmv.v.f_f_nxv8f64(double %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv8f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu -; CHECK-NEXT: vfmv.v.f v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmv.v.f.nxv8f64( - double %0, - i64 %1) - - ret %a -} - -define @intrinsic_vfmv.v.f_zero_nxv1f16(i64 %0) nounwind { -; CHECK-LABEL: intrinsic_vfmv.v.f_zero_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vmv.v.i v8, 0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmv.v.f.nxv1f16( - half 0.0, - i64 %0) - - ret %a -} - -define @intrinsic_vmv.v.i_zero_nxv2f16(i64 %0) nounwind { -; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vmv.v.i v8, 0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmv.v.f.nxv2f16( - half 0.0, - i64 %0) - - ret %a -} - -define @intrinsic_vmv.v.i_zero_nxv4f16(i64 %0) nounwind { -; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vmv.v.i v8, 0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmv.v.f.nxv4f16( - half 0.0, - i64 %0) - - ret %a -} - -define @intrinsic_vmv.v.i_zero_nxv8f16(i64 %0) nounwind { -; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vmv.v.i v8, 0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmv.v.f.nxv8f16( - half 0.0, - i64 %0) - - ret %a -} - -define @intrinsic_vmv.v.i_zero_nxv16f16(i64 %0) nounwind { -; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vmv.v.i v8, 0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmv.v.f.nxv16f16( - half 0.0, - i64 %0) - - ret %a -} - -define @intrinsic_vmv.v.i_zero_nxv32f16(i64 %0) nounwind { -; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv32f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu -; CHECK-NEXT: vmv.v.i v8, 0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmv.v.f.nxv32f16( - half 0.0, - i64 %0) - - ret %a -} - -define @intrinsic_vmv.v.i_zero_nxv1f32(i64 %0) nounwind { -; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vmv.v.i v8, 0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmv.v.f.nxv1f32( - float 0.0, - i64 %0) - - ret %a -} - -define @intrinsic_vmv.v.i_zero_nxv2f32(i64 %0) nounwind { -; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vmv.v.i v8, 0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmv.v.f.nxv2f32( - float 0.0, - i64 %0) - - ret %a -} - -define @intrinsic_vmv.v.i_zero_nxv4f32(i64 %0) nounwind { -; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vmv.v.i v8, 0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmv.v.f.nxv4f32( - float 0.0, - i64 %0) - - ret %a -} - -define @intrinsic_vmv.v.i_zero_nxv8f32(i64 %0) nounwind { -; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vmv.v.i v8, 0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmv.v.f.nxv8f32( - float 0.0, - i64 %0) - - ret %a -} - -define @intrinsic_vmv.v.i_zero_nxv16f32(i64 %0) nounwind { -; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv16f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu -; CHECK-NEXT: vmv.v.i v8, 0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmv.v.f.nxv16f32( - float 0.0, - i64 %0) - - ret %a -} - -define @intrinsic_vmv.v.i_zero_nxv1f64(i64 %0) nounwind { -; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vmv.v.i v8, 0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmv.v.f.nxv1f64( - double 0.0, - i64 %0) - - ret %a -} - -define @intrinsic_vmv.v.i_zero_nxv2f64(i64 %0) nounwind { -; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vmv.v.i v8, 0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmv.v.f.nxv2f64( - double 0.0, - i64 %0) - - ret %a -} - -define @intrinsic_vmv.v.i_zero_nxv4f64(i64 %0) nounwind { -; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vmv.v.i v8, 0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmv.v.f.nxv4f64( - double 0.0, - i64 %0) - - ret %a -} - -define @intrinsic_vmv.v.i_zero_nxv8f64(i64 %0) nounwind { -; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv8f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu -; CHECK-NEXT: vmv.v.i v8, 0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfmv.v.f.nxv8f64( - double 0.0, - i64 %0) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmv.v.f-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfmv.v.f.ll similarity index 82% rename from llvm/test/CodeGen/RISCV/rvv/vfmv.v.f-rv32.ll rename to llvm/test/CodeGen/RISCV/rvv/vfmv.v.f.ll index b4acc57dcd81..6e0613e3e49b 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfmv.v.f-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmv.v.f.ll @@ -1,11 +1,13 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -target-abi ilp32d -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfmv.v.f.nxv1f16( half, - i32); + iXLen); -define @intrinsic_vfmv.v.f_f_nxv1f16(half %0, i32 %1) nounwind { +define @intrinsic_vfmv.v.f_f_nxv1f16(half %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -14,16 +16,16 @@ define @intrinsic_vfmv.v.f_f_nxv1f16(half %0, i32 %1) nounwi entry: %a = call @llvm.riscv.vfmv.v.f.nxv1f16( half %0, - i32 %1) + iXLen %1) ret %a } declare @llvm.riscv.vfmv.v.f.nxv2f16( half, - i32); + iXLen); -define @intrinsic_vfmv.v.f_f_nxv2f16(half %0, i32 %1) nounwind { +define @intrinsic_vfmv.v.f_f_nxv2f16(half %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -32,16 +34,16 @@ define @intrinsic_vfmv.v.f_f_nxv2f16(half %0, i32 %1) nounwi entry: %a = call @llvm.riscv.vfmv.v.f.nxv2f16( half %0, - i32 %1) + iXLen %1) ret %a } declare @llvm.riscv.vfmv.v.f.nxv4f16( half, - i32); + iXLen); -define @intrinsic_vfmv.v.f_f_nxv4f16(half %0, i32 %1) nounwind { +define @intrinsic_vfmv.v.f_f_nxv4f16(half %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -50,16 +52,16 @@ define @intrinsic_vfmv.v.f_f_nxv4f16(half %0, i32 %1) nounwi entry: %a = call @llvm.riscv.vfmv.v.f.nxv4f16( half %0, - i32 %1) + iXLen %1) ret %a } declare @llvm.riscv.vfmv.v.f.nxv8f16( half, - i32); + iXLen); -define @intrinsic_vfmv.v.f_f_nxv8f16(half %0, i32 %1) nounwind { +define @intrinsic_vfmv.v.f_f_nxv8f16(half %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -68,16 +70,16 @@ define @intrinsic_vfmv.v.f_f_nxv8f16(half %0, i32 %1) nounwi entry: %a = call @llvm.riscv.vfmv.v.f.nxv8f16( half %0, - i32 %1) + iXLen %1) ret %a } declare @llvm.riscv.vfmv.v.f.nxv16f16( half, - i32); + iXLen); -define @intrinsic_vfmv.v.f_f_nxv16f16(half %0, i32 %1) nounwind { +define @intrinsic_vfmv.v.f_f_nxv16f16(half %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -86,16 +88,16 @@ define @intrinsic_vfmv.v.f_f_nxv16f16(half %0, i32 %1) noun entry: %a = call @llvm.riscv.vfmv.v.f.nxv16f16( half %0, - i32 %1) + iXLen %1) ret %a } declare @llvm.riscv.vfmv.v.f.nxv32f16( half, - i32); + iXLen); -define @intrinsic_vfmv.v.f_f_nxv32f16(half %0, i32 %1) nounwind { +define @intrinsic_vfmv.v.f_f_nxv32f16(half %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv32f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu @@ -104,16 +106,16 @@ define @intrinsic_vfmv.v.f_f_nxv32f16(half %0, i32 %1) noun entry: %a = call @llvm.riscv.vfmv.v.f.nxv32f16( half %0, - i32 %1) + iXLen %1) ret %a } declare @llvm.riscv.vfmv.v.f.nxv1f32( float, - i32); + iXLen); -define @intrinsic_vfmv.v.f_f_nxv1f32(float %0, i32 %1) nounwind { +define @intrinsic_vfmv.v.f_f_nxv1f32(float %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -122,16 +124,16 @@ define @intrinsic_vfmv.v.f_f_nxv1f32(float %0, i32 %1) noun entry: %a = call @llvm.riscv.vfmv.v.f.nxv1f32( float %0, - i32 %1) + iXLen %1) ret %a } declare @llvm.riscv.vfmv.v.f.nxv2f32( float, - i32); + iXLen); -define @intrinsic_vfmv.v.f_f_nxv2f32(float %0, i32 %1) nounwind { +define @intrinsic_vfmv.v.f_f_nxv2f32(float %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -140,16 +142,16 @@ define @intrinsic_vfmv.v.f_f_nxv2f32(float %0, i32 %1) noun entry: %a = call @llvm.riscv.vfmv.v.f.nxv2f32( float %0, - i32 %1) + iXLen %1) ret %a } declare @llvm.riscv.vfmv.v.f.nxv4f32( float, - i32); + iXLen); -define @intrinsic_vfmv.v.f_f_nxv4f32(float %0, i32 %1) nounwind { +define @intrinsic_vfmv.v.f_f_nxv4f32(float %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -158,16 +160,16 @@ define @intrinsic_vfmv.v.f_f_nxv4f32(float %0, i32 %1) noun entry: %a = call @llvm.riscv.vfmv.v.f.nxv4f32( float %0, - i32 %1) + iXLen %1) ret %a } declare @llvm.riscv.vfmv.v.f.nxv8f32( float, - i32); + iXLen); -define @intrinsic_vfmv.v.f_f_nxv8f32(float %0, i32 %1) nounwind { +define @intrinsic_vfmv.v.f_f_nxv8f32(float %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -176,16 +178,16 @@ define @intrinsic_vfmv.v.f_f_nxv8f32(float %0, i32 %1) noun entry: %a = call @llvm.riscv.vfmv.v.f.nxv8f32( float %0, - i32 %1) + iXLen %1) ret %a } declare @llvm.riscv.vfmv.v.f.nxv16f32( float, - i32); + iXLen); -define @intrinsic_vfmv.v.f_f_nxv16f32(float %0, i32 %1) nounwind { +define @intrinsic_vfmv.v.f_f_nxv16f32(float %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv16f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu @@ -194,16 +196,16 @@ define @intrinsic_vfmv.v.f_f_nxv16f32(float %0, i32 %1) no entry: %a = call @llvm.riscv.vfmv.v.f.nxv16f32( float %0, - i32 %1) + iXLen %1) ret %a } declare @llvm.riscv.vfmv.v.f.nxv1f64( double, - i32); + iXLen); -define @intrinsic_vfmv.v.f_f_nxv1f64(double %0, i32 %1) nounwind { +define @intrinsic_vfmv.v.f_f_nxv1f64(double %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu @@ -212,16 +214,16 @@ define @intrinsic_vfmv.v.f_f_nxv1f64(double %0, i32 %1) no entry: %a = call @llvm.riscv.vfmv.v.f.nxv1f64( double %0, - i32 %1) + iXLen %1) ret %a } declare @llvm.riscv.vfmv.v.f.nxv2f64( double, - i32); + iXLen); -define @intrinsic_vfmv.v.f_f_nxv2f64(double %0, i32 %1) nounwind { +define @intrinsic_vfmv.v.f_f_nxv2f64(double %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu @@ -230,16 +232,16 @@ define @intrinsic_vfmv.v.f_f_nxv2f64(double %0, i32 %1) no entry: %a = call @llvm.riscv.vfmv.v.f.nxv2f64( double %0, - i32 %1) + iXLen %1) ret %a } declare @llvm.riscv.vfmv.v.f.nxv4f64( double, - i32); + iXLen); -define @intrinsic_vfmv.v.f_f_nxv4f64(double %0, i32 %1) nounwind { +define @intrinsic_vfmv.v.f_f_nxv4f64(double %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu @@ -248,16 +250,16 @@ define @intrinsic_vfmv.v.f_f_nxv4f64(double %0, i32 %1) no entry: %a = call @llvm.riscv.vfmv.v.f.nxv4f64( double %0, - i32 %1) + iXLen %1) ret %a } declare @llvm.riscv.vfmv.v.f.nxv8f64( double, - i32); + iXLen); -define @intrinsic_vfmv.v.f_f_nxv8f64(double %0, i32 %1) nounwind { +define @intrinsic_vfmv.v.f_f_nxv8f64(double %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv8f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu @@ -266,12 +268,12 @@ define @intrinsic_vfmv.v.f_f_nxv8f64(double %0, i32 %1) no entry: %a = call @llvm.riscv.vfmv.v.f.nxv8f64( double %0, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vfmv.v.f_zero_nxv1f16(i32 %0) nounwind { +define @intrinsic_vfmv.v.f_zero_nxv1f16(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vfmv.v.f_zero_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -280,12 +282,12 @@ define @intrinsic_vfmv.v.f_zero_nxv1f16(i32 %0) nounwind { entry: %a = call @llvm.riscv.vfmv.v.f.nxv1f16( half 0.0, - i32 %0) + iXLen %0) ret %a } -define @intrinsic_vmv.v.i_zero_nxv2f16(i32 %0) nounwind { +define @intrinsic_vmv.v.i_zero_nxv2f16(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -294,12 +296,12 @@ define @intrinsic_vmv.v.i_zero_nxv2f16(i32 %0) nounwind { entry: %a = call @llvm.riscv.vfmv.v.f.nxv2f16( half 0.0, - i32 %0) + iXLen %0) ret %a } -define @intrinsic_vmv.v.i_zero_nxv4f16(i32 %0) nounwind { +define @intrinsic_vmv.v.i_zero_nxv4f16(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -308,12 +310,12 @@ define @intrinsic_vmv.v.i_zero_nxv4f16(i32 %0) nounwind { entry: %a = call @llvm.riscv.vfmv.v.f.nxv4f16( half 0.0, - i32 %0) + iXLen %0) ret %a } -define @intrinsic_vmv.v.i_zero_nxv8f16(i32 %0) nounwind { +define @intrinsic_vmv.v.i_zero_nxv8f16(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -322,12 +324,12 @@ define @intrinsic_vmv.v.i_zero_nxv8f16(i32 %0) nounwind { entry: %a = call @llvm.riscv.vfmv.v.f.nxv8f16( half 0.0, - i32 %0) + iXLen %0) ret %a } -define @intrinsic_vmv.v.i_zero_nxv16f16(i32 %0) nounwind { +define @intrinsic_vmv.v.i_zero_nxv16f16(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -336,12 +338,12 @@ define @intrinsic_vmv.v.i_zero_nxv16f16(i32 %0) nounwind { entry: %a = call @llvm.riscv.vfmv.v.f.nxv16f16( half 0.0, - i32 %0) + iXLen %0) ret %a } -define @intrinsic_vmv.v.i_zero_nxv32f16(i32 %0) nounwind { +define @intrinsic_vmv.v.i_zero_nxv32f16(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv32f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu @@ -350,12 +352,12 @@ define @intrinsic_vmv.v.i_zero_nxv32f16(i32 %0) nounwind { entry: %a = call @llvm.riscv.vfmv.v.f.nxv32f16( half 0.0, - i32 %0) + iXLen %0) ret %a } -define @intrinsic_vmv.v.i_zero_nxv1f32(i32 %0) nounwind { +define @intrinsic_vmv.v.i_zero_nxv1f32(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -364,12 +366,12 @@ define @intrinsic_vmv.v.i_zero_nxv1f32(i32 %0) nounwind { entry: %a = call @llvm.riscv.vfmv.v.f.nxv1f32( float 0.0, - i32 %0) + iXLen %0) ret %a } -define @intrinsic_vmv.v.i_zero_nxv2f32(i32 %0) nounwind { +define @intrinsic_vmv.v.i_zero_nxv2f32(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -378,12 +380,12 @@ define @intrinsic_vmv.v.i_zero_nxv2f32(i32 %0) nounwind { entry: %a = call @llvm.riscv.vfmv.v.f.nxv2f32( float 0.0, - i32 %0) + iXLen %0) ret %a } -define @intrinsic_vmv.v.i_zero_nxv4f32(i32 %0) nounwind { +define @intrinsic_vmv.v.i_zero_nxv4f32(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -392,12 +394,12 @@ define @intrinsic_vmv.v.i_zero_nxv4f32(i32 %0) nounwind { entry: %a = call @llvm.riscv.vfmv.v.f.nxv4f32( float 0.0, - i32 %0) + iXLen %0) ret %a } -define @intrinsic_vmv.v.i_zero_nxv8f32(i32 %0) nounwind { +define @intrinsic_vmv.v.i_zero_nxv8f32(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -406,12 +408,12 @@ define @intrinsic_vmv.v.i_zero_nxv8f32(i32 %0) nounwind { entry: %a = call @llvm.riscv.vfmv.v.f.nxv8f32( float 0.0, - i32 %0) + iXLen %0) ret %a } -define @intrinsic_vmv.v.i_zero_nxv16f32(i32 %0) nounwind { +define @intrinsic_vmv.v.i_zero_nxv16f32(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv16f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu @@ -420,12 +422,12 @@ define @intrinsic_vmv.v.i_zero_nxv16f32(i32 %0) nounwind { entry: %a = call @llvm.riscv.vfmv.v.f.nxv16f32( float 0.0, - i32 %0) + iXLen %0) ret %a } -define @intrinsic_vmv.v.i_zero_nxv1f64(i32 %0) nounwind { +define @intrinsic_vmv.v.i_zero_nxv1f64(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu @@ -434,12 +436,12 @@ define @intrinsic_vmv.v.i_zero_nxv1f64(i32 %0) nounwind { entry: %a = call @llvm.riscv.vfmv.v.f.nxv1f64( double 0.0, - i32 %0) + iXLen %0) ret %a } -define @intrinsic_vmv.v.i_zero_nxv2f64(i32 %0) nounwind { +define @intrinsic_vmv.v.i_zero_nxv2f64(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu @@ -448,12 +450,12 @@ define @intrinsic_vmv.v.i_zero_nxv2f64(i32 %0) nounwind { entry: %a = call @llvm.riscv.vfmv.v.f.nxv2f64( double 0.0, - i32 %0) + iXLen %0) ret %a } -define @intrinsic_vmv.v.i_zero_nxv4f64(i32 %0) nounwind { +define @intrinsic_vmv.v.i_zero_nxv4f64(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu @@ -462,12 +464,12 @@ define @intrinsic_vmv.v.i_zero_nxv4f64(i32 %0) nounwind { entry: %a = call @llvm.riscv.vfmv.v.f.nxv4f64( double 0.0, - i32 %0) + iXLen %0) ret %a } -define @intrinsic_vmv.v.i_zero_nxv8f64(i32 %0) nounwind { +define @intrinsic_vmv.v.i_zero_nxv8f64(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv8f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu @@ -476,7 +478,7 @@ define @intrinsic_vmv.v.i_zero_nxv8f64(i32 %0) nounwind { entry: %a = call @llvm.riscv.vfmv.v.f.nxv8f64( double 0.0, - i32 %0) + iXLen %0) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-f-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-f-rv64.ll deleted file mode 100644 index 2e35f75eb89c..000000000000 --- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-f-rv64.ll +++ /dev/null @@ -1,380 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s -declare @llvm.riscv.vfncvt.f.f.w.nxv1f16.nxv1f32( - , - i64); - -define @intrinsic_vfncvt_f.f.w_nxv1f16_nxv1f32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_f.f.w_nxv1f16_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfncvt.f.f.w v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.f.f.w.nxv1f16.nxv1f32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.f.f.w.mask.nxv1f16.nxv1f32( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_f.f.w_nxv1f16_nxv1f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv1f16_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfncvt.f.f.w v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.f.f.w.mask.nxv1f16.nxv1f32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.f.f.w.nxv2f16.nxv2f32( - , - i64); - -define @intrinsic_vfncvt_f.f.w_nxv2f16_nxv2f32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_f.f.w_nxv2f16_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfncvt.f.f.w v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.f.f.w.nxv2f16.nxv2f32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.f.f.w.mask.nxv2f16.nxv2f32( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_f.f.w_nxv2f16_nxv2f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv2f16_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfncvt.f.f.w v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.f.f.w.mask.nxv2f16.nxv2f32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.f.f.w.nxv4f16.nxv4f32( - , - i64); - -define @intrinsic_vfncvt_f.f.w_nxv4f16_nxv4f32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_f.f.w_nxv4f16_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfncvt.f.f.w v10, v8 -; CHECK-NEXT: vmv.v.v v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.f.f.w.nxv4f16.nxv4f32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.f.f.w.mask.nxv4f16.nxv4f32( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_f.f.w_nxv4f16_nxv4f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv4f16_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfncvt.f.f.w v8, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.f.f.w.mask.nxv4f16.nxv4f32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.f.f.w.nxv8f16.nxv8f32( - , - i64); - -define @intrinsic_vfncvt_f.f.w_nxv8f16_nxv8f32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_f.f.w_nxv8f16_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfncvt.f.f.w v12, v8 -; CHECK-NEXT: vmv.v.v v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.f.f.w.nxv8f16.nxv8f32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.f.f.w.mask.nxv8f16.nxv8f32( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_f.f.w_nxv8f16_nxv8f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv8f16_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfncvt.f.f.w v8, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.f.f.w.mask.nxv8f16.nxv8f32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.f.f.w.nxv16f16.nxv16f32( - , - i64); - -define @intrinsic_vfncvt_f.f.w_nxv16f16_nxv16f32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_f.f.w_nxv16f16_nxv16f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfncvt.f.f.w v16, v8 -; CHECK-NEXT: vmv.v.v v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.f.f.w.nxv16f16.nxv16f32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.f.f.w.mask.nxv16f16.nxv16f32( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_f.f.w_nxv16f16_nxv16f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv16f16_nxv16f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfncvt.f.f.w v8, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.f.f.w.mask.nxv16f16.nxv16f32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.f.f.w.nxv1f32.nxv1f64( - , - i64); - -define @intrinsic_vfncvt_f.f.w_nxv1f32_nxv1f64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_f.f.w_nxv1f32_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfncvt.f.f.w v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.f.f.w.nxv1f32.nxv1f64( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.f.f.w.mask.nxv1f32.nxv1f64( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_f.f.w_nxv1f32_nxv1f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv1f32_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfncvt.f.f.w v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.f.f.w.mask.nxv1f32.nxv1f64( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.f.f.w.nxv2f32.nxv2f64( - , - i64); - -define @intrinsic_vfncvt_f.f.w_nxv2f32_nxv2f64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_f.f.w_nxv2f32_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfncvt.f.f.w v10, v8 -; CHECK-NEXT: vmv.v.v v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.f.f.w.nxv2f32.nxv2f64( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.f.f.w.mask.nxv2f32.nxv2f64( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_f.f.w_nxv2f32_nxv2f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv2f32_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfncvt.f.f.w v8, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.f.f.w.mask.nxv2f32.nxv2f64( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.f.f.w.nxv4f32.nxv4f64( - , - i64); - -define @intrinsic_vfncvt_f.f.w_nxv4f32_nxv4f64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_f.f.w_nxv4f32_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfncvt.f.f.w v12, v8 -; CHECK-NEXT: vmv.v.v v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.f.f.w.nxv4f32.nxv4f64( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.f.f.w.mask.nxv4f32.nxv4f64( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_f.f.w_nxv4f32_nxv4f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv4f32_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfncvt.f.f.w v8, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.f.f.w.mask.nxv4f32.nxv4f64( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.f.f.w.nxv8f32.nxv8f64( - , - i64); - -define @intrinsic_vfncvt_f.f.w_nxv8f32_nxv8f64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_f.f.w_nxv8f32_nxv8f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfncvt.f.f.w v16, v8 -; CHECK-NEXT: vmv.v.v v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.f.f.w.nxv8f32.nxv8f64( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.f.f.w.mask.nxv8f32.nxv8f64( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_f.f.w_nxv8f32_nxv8f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv8f32_nxv8f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfncvt.f.f.w v8, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.f.f.w.mask.nxv8f32.nxv8f64( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-f-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-f.ll similarity index 86% rename from llvm/test/CodeGen/RISCV/rvv/vfncvt-f-f-rv32.ll rename to llvm/test/CodeGen/RISCV/rvv/vfncvt-f-f.ll index 014ff81bada1..e757261e7363 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-f-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-f.ll @@ -1,11 +1,13 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfncvt.f.f.w.nxv1f16.nxv1f32( , - i32); + iXLen); -define @intrinsic_vfncvt_f.f.w_nxv1f16_nxv1f32( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_f.f.w_nxv1f16_nxv1f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_f.f.w_nxv1f16_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -15,7 +17,7 @@ define @intrinsic_vfncvt_f.f.w_nxv1f16_nxv1f32( @llvm.riscv.vfncvt.f.f.w.nxv1f16.nxv1f32( %0, - i32 %1) + iXLen %1) ret %a } @@ -24,10 +26,10 @@ declare @llvm.riscv.vfncvt.f.f.w.mask.nxv1f16.nxv1f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_f.f.w_nxv1f16_nxv1f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_f.f.w_nxv1f16_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv1f16_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -38,16 +40,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.f.f.w.nxv2f16.nxv2f32( , - i32); + iXLen); -define @intrinsic_vfncvt_f.f.w_nxv2f16_nxv2f32( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_f.f.w_nxv2f16_nxv2f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_f.f.w_nxv2f16_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -57,7 +59,7 @@ define @intrinsic_vfncvt_f.f.w_nxv2f16_nxv2f32( @llvm.riscv.vfncvt.f.f.w.nxv2f16.nxv2f32( %0, - i32 %1) + iXLen %1) ret %a } @@ -66,10 +68,10 @@ declare @llvm.riscv.vfncvt.f.f.w.mask.nxv2f16.nxv2f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_f.f.w_nxv2f16_nxv2f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_f.f.w_nxv2f16_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv2f16_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -80,16 +82,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.f.f.w.nxv4f16.nxv4f32( , - i32); + iXLen); -define @intrinsic_vfncvt_f.f.w_nxv4f16_nxv4f32( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_f.f.w_nxv4f16_nxv4f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_f.f.w_nxv4f16_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -99,7 +101,7 @@ define @intrinsic_vfncvt_f.f.w_nxv4f16_nxv4f32( @llvm.riscv.vfncvt.f.f.w.nxv4f16.nxv4f32( %0, - i32 %1) + iXLen %1) ret %a } @@ -108,10 +110,10 @@ declare @llvm.riscv.vfncvt.f.f.w.mask.nxv4f16.nxv4f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_f.f.w_nxv4f16_nxv4f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_f.f.w_nxv4f16_nxv4f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv4f16_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -122,16 +124,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.f.f.w.nxv8f16.nxv8f32( , - i32); + iXLen); -define @intrinsic_vfncvt_f.f.w_nxv8f16_nxv8f32( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_f.f.w_nxv8f16_nxv8f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_f.f.w_nxv8f16_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -141,7 +143,7 @@ define @intrinsic_vfncvt_f.f.w_nxv8f16_nxv8f32( @llvm.riscv.vfncvt.f.f.w.nxv8f16.nxv8f32( %0, - i32 %1) + iXLen %1) ret %a } @@ -150,10 +152,10 @@ declare @llvm.riscv.vfncvt.f.f.w.mask.nxv8f16.nxv8f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_f.f.w_nxv8f16_nxv8f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_f.f.w_nxv8f16_nxv8f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv8f16_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -164,16 +166,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.f.f.w.nxv16f16.nxv16f32( , - i32); + iXLen); -define @intrinsic_vfncvt_f.f.w_nxv16f16_nxv16f32( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_f.f.w_nxv16f16_nxv16f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_f.f.w_nxv16f16_nxv16f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -183,7 +185,7 @@ define @intrinsic_vfncvt_f.f.w_nxv16f16_nxv16f32( @llvm.riscv.vfncvt.f.f.w.nxv16f16.nxv16f32( %0, - i32 %1) + iXLen %1) ret %a } @@ -192,10 +194,10 @@ declare @llvm.riscv.vfncvt.f.f.w.mask.nxv16f16.nxv16f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_f.f.w_nxv16f16_nxv16f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_f.f.w_nxv16f16_nxv16f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv16f16_nxv16f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -206,16 +208,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.f.f.w.nxv1f32.nxv1f64( , - i32); + iXLen); -define @intrinsic_vfncvt_f.f.w_nxv1f32_nxv1f64( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_f.f.w_nxv1f32_nxv1f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_f.f.w_nxv1f32_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -225,7 +227,7 @@ define @intrinsic_vfncvt_f.f.w_nxv1f32_nxv1f64( @llvm.riscv.vfncvt.f.f.w.nxv1f32.nxv1f64( %0, - i32 %1) + iXLen %1) ret %a } @@ -234,10 +236,10 @@ declare @llvm.riscv.vfncvt.f.f.w.mask.nxv1f32.nxv1f64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_f.f.w_nxv1f32_nxv1f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_f.f.w_nxv1f32_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv1f32_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -248,16 +250,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.f.f.w.nxv2f32.nxv2f64( , - i32); + iXLen); -define @intrinsic_vfncvt_f.f.w_nxv2f32_nxv2f64( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_f.f.w_nxv2f32_nxv2f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_f.f.w_nxv2f32_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -267,7 +269,7 @@ define @intrinsic_vfncvt_f.f.w_nxv2f32_nxv2f64( @llvm.riscv.vfncvt.f.f.w.nxv2f32.nxv2f64( %0, - i32 %1) + iXLen %1) ret %a } @@ -276,10 +278,10 @@ declare @llvm.riscv.vfncvt.f.f.w.mask.nxv2f32.nxv2f64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_f.f.w_nxv2f32_nxv2f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_f.f.w_nxv2f32_nxv2f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv2f32_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -290,16 +292,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.f.f.w.nxv4f32.nxv4f64( , - i32); + iXLen); -define @intrinsic_vfncvt_f.f.w_nxv4f32_nxv4f64( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_f.f.w_nxv4f32_nxv4f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_f.f.w_nxv4f32_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -309,7 +311,7 @@ define @intrinsic_vfncvt_f.f.w_nxv4f32_nxv4f64( @llvm.riscv.vfncvt.f.f.w.nxv4f32.nxv4f64( %0, - i32 %1) + iXLen %1) ret %a } @@ -318,10 +320,10 @@ declare @llvm.riscv.vfncvt.f.f.w.mask.nxv4f32.nxv4f64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_f.f.w_nxv4f32_nxv4f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_f.f.w_nxv4f32_nxv4f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv4f32_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -332,16 +334,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.f.f.w.nxv8f32.nxv8f64( , - i32); + iXLen); -define @intrinsic_vfncvt_f.f.w_nxv8f32_nxv8f64( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_f.f.w_nxv8f32_nxv8f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_f.f.w_nxv8f32_nxv8f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -351,7 +353,7 @@ define @intrinsic_vfncvt_f.f.w_nxv8f32_nxv8f64( @llvm.riscv.vfncvt.f.f.w.nxv8f32.nxv8f64( %0, - i32 %1) + iXLen %1) ret %a } @@ -360,10 +362,10 @@ declare @llvm.riscv.vfncvt.f.f.w.mask.nxv8f32.nxv8f64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_f.f.w_nxv8f32_nxv8f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_f.f.w_nxv8f32_nxv8f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv8f32_nxv8f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -374,7 +376,7 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-x-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-x-rv64.ll deleted file mode 100644 index d8d55df8e458..000000000000 --- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-x-rv64.ll +++ /dev/null @@ -1,380 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s -declare @llvm.riscv.vfncvt.f.x.w.nxv1f16.nxv1i32( - , - i64); - -define @intrinsic_vfncvt_f.x.w_nxv1f16_nxv1i32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_f.x.w_nxv1f16_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfncvt.f.x.w v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.f.x.w.nxv1f16.nxv1i32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.f.x.w.mask.nxv1f16.nxv1i32( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_f.x.w_nxv1f16_nxv1i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_f.x.w_nxv1f16_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfncvt.f.x.w v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.f.x.w.mask.nxv1f16.nxv1i32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.f.x.w.nxv2f16.nxv2i32( - , - i64); - -define @intrinsic_vfncvt_f.x.w_nxv2f16_nxv2i32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_f.x.w_nxv2f16_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfncvt.f.x.w v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.f.x.w.nxv2f16.nxv2i32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.f.x.w.mask.nxv2f16.nxv2i32( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_f.x.w_nxv2f16_nxv2i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_f.x.w_nxv2f16_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfncvt.f.x.w v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.f.x.w.mask.nxv2f16.nxv2i32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.f.x.w.nxv4f16.nxv4i32( - , - i64); - -define @intrinsic_vfncvt_f.x.w_nxv4f16_nxv4i32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_f.x.w_nxv4f16_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfncvt.f.x.w v10, v8 -; CHECK-NEXT: vmv.v.v v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.f.x.w.nxv4f16.nxv4i32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.f.x.w.mask.nxv4f16.nxv4i32( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_f.x.w_nxv4f16_nxv4i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_f.x.w_nxv4f16_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfncvt.f.x.w v8, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.f.x.w.mask.nxv4f16.nxv4i32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.f.x.w.nxv8f16.nxv8i32( - , - i64); - -define @intrinsic_vfncvt_f.x.w_nxv8f16_nxv8i32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_f.x.w_nxv8f16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfncvt.f.x.w v12, v8 -; CHECK-NEXT: vmv.v.v v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.f.x.w.nxv8f16.nxv8i32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.f.x.w.mask.nxv8f16.nxv8i32( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_f.x.w_nxv8f16_nxv8i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_f.x.w_nxv8f16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfncvt.f.x.w v8, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.f.x.w.mask.nxv8f16.nxv8i32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.f.x.w.nxv16f16.nxv16i32( - , - i64); - -define @intrinsic_vfncvt_f.x.w_nxv16f16_nxv16i32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_f.x.w_nxv16f16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfncvt.f.x.w v16, v8 -; CHECK-NEXT: vmv.v.v v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.f.x.w.nxv16f16.nxv16i32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.f.x.w.mask.nxv16f16.nxv16i32( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_f.x.w_nxv16f16_nxv16i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_f.x.w_nxv16f16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfncvt.f.x.w v8, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.f.x.w.mask.nxv16f16.nxv16i32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.f.x.w.nxv1f32.nxv1i64( - , - i64); - -define @intrinsic_vfncvt_f.x.w_nxv1f32_nxv1i64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_f.x.w_nxv1f32_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfncvt.f.x.w v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.f.x.w.nxv1f32.nxv1i64( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.f.x.w.mask.nxv1f32.nxv1i64( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_f.x.w_nxv1f32_nxv1i64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_f.x.w_nxv1f32_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfncvt.f.x.w v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.f.x.w.mask.nxv1f32.nxv1i64( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.f.x.w.nxv2f32.nxv2i64( - , - i64); - -define @intrinsic_vfncvt_f.x.w_nxv2f32_nxv2i64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_f.x.w_nxv2f32_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfncvt.f.x.w v10, v8 -; CHECK-NEXT: vmv.v.v v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.f.x.w.nxv2f32.nxv2i64( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.f.x.w.mask.nxv2f32.nxv2i64( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_f.x.w_nxv2f32_nxv2i64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_f.x.w_nxv2f32_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfncvt.f.x.w v8, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.f.x.w.mask.nxv2f32.nxv2i64( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.f.x.w.nxv4f32.nxv4i64( - , - i64); - -define @intrinsic_vfncvt_f.x.w_nxv4f32_nxv4i64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_f.x.w_nxv4f32_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfncvt.f.x.w v12, v8 -; CHECK-NEXT: vmv.v.v v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.f.x.w.nxv4f32.nxv4i64( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.f.x.w.mask.nxv4f32.nxv4i64( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_f.x.w_nxv4f32_nxv4i64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_f.x.w_nxv4f32_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfncvt.f.x.w v8, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.f.x.w.mask.nxv4f32.nxv4i64( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.f.x.w.nxv8f32.nxv8i64( - , - i64); - -define @intrinsic_vfncvt_f.x.w_nxv8f32_nxv8i64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_f.x.w_nxv8f32_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfncvt.f.x.w v16, v8 -; CHECK-NEXT: vmv.v.v v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.f.x.w.nxv8f32.nxv8i64( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.f.x.w.mask.nxv8f32.nxv8i64( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_f.x.w_nxv8f32_nxv8i64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_f.x.w_nxv8f32_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfncvt.f.x.w v8, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.f.x.w.mask.nxv8f32.nxv8i64( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-x-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-x.ll similarity index 87% rename from llvm/test/CodeGen/RISCV/rvv/vfncvt-f-x-rv32.ll rename to llvm/test/CodeGen/RISCV/rvv/vfncvt-f-x.ll index ab5f66bf692f..eedc7c163399 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-x-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-x.ll @@ -1,11 +1,13 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfncvt.f.x.w.nxv1f16.nxv1i32( , - i32); + iXLen); -define @intrinsic_vfncvt_f.x.w_nxv1f16_nxv1i32( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_f.x.w_nxv1f16_nxv1i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_f.x.w_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -15,7 +17,7 @@ define @intrinsic_vfncvt_f.x.w_nxv1f16_nxv1i32( @llvm.riscv.vfncvt.f.x.w.nxv1f16.nxv1i32( %0, - i32 %1) + iXLen %1) ret %a } @@ -24,10 +26,10 @@ declare @llvm.riscv.vfncvt.f.x.w.mask.nxv1f16.nxv1i32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_f.x.w_nxv1f16_nxv1i32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_f.x.w_nxv1f16_nxv1i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.x.w_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -38,16 +40,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.f.x.w.nxv2f16.nxv2i32( , - i32); + iXLen); -define @intrinsic_vfncvt_f.x.w_nxv2f16_nxv2i32( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_f.x.w_nxv2f16_nxv2i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_f.x.w_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -57,7 +59,7 @@ define @intrinsic_vfncvt_f.x.w_nxv2f16_nxv2i32( @llvm.riscv.vfncvt.f.x.w.nxv2f16.nxv2i32( %0, - i32 %1) + iXLen %1) ret %a } @@ -66,10 +68,10 @@ declare @llvm.riscv.vfncvt.f.x.w.mask.nxv2f16.nxv2i32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_f.x.w_nxv2f16_nxv2i32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_f.x.w_nxv2f16_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.x.w_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -80,16 +82,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.f.x.w.nxv4f16.nxv4i32( , - i32); + iXLen); -define @intrinsic_vfncvt_f.x.w_nxv4f16_nxv4i32( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_f.x.w_nxv4f16_nxv4i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_f.x.w_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -99,7 +101,7 @@ define @intrinsic_vfncvt_f.x.w_nxv4f16_nxv4i32( @llvm.riscv.vfncvt.f.x.w.nxv4f16.nxv4i32( %0, - i32 %1) + iXLen %1) ret %a } @@ -108,10 +110,10 @@ declare @llvm.riscv.vfncvt.f.x.w.mask.nxv4f16.nxv4i32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_f.x.w_nxv4f16_nxv4i32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_f.x.w_nxv4f16_nxv4i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.x.w_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -122,16 +124,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.f.x.w.nxv8f16.nxv8i32( , - i32); + iXLen); -define @intrinsic_vfncvt_f.x.w_nxv8f16_nxv8i32( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_f.x.w_nxv8f16_nxv8i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_f.x.w_nxv8f16_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -141,7 +143,7 @@ define @intrinsic_vfncvt_f.x.w_nxv8f16_nxv8i32( @llvm.riscv.vfncvt.f.x.w.nxv8f16.nxv8i32( %0, - i32 %1) + iXLen %1) ret %a } @@ -150,10 +152,10 @@ declare @llvm.riscv.vfncvt.f.x.w.mask.nxv8f16.nxv8i32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_f.x.w_nxv8f16_nxv8i32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_f.x.w_nxv8f16_nxv8i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.x.w_nxv8f16_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -164,16 +166,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.f.x.w.nxv16f16.nxv16i32( , - i32); + iXLen); -define @intrinsic_vfncvt_f.x.w_nxv16f16_nxv16i32( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_f.x.w_nxv16f16_nxv16i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_f.x.w_nxv16f16_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -183,7 +185,7 @@ define @intrinsic_vfncvt_f.x.w_nxv16f16_nxv16i32( @llvm.riscv.vfncvt.f.x.w.nxv16f16.nxv16i32( %0, - i32 %1) + iXLen %1) ret %a } @@ -192,10 +194,10 @@ declare @llvm.riscv.vfncvt.f.x.w.mask.nxv16f16.nxv16i32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_f.x.w_nxv16f16_nxv16i32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_f.x.w_nxv16f16_nxv16i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.x.w_nxv16f16_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -206,16 +208,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.f.x.w.nxv1f32.nxv1i64( , - i32); + iXLen); -define @intrinsic_vfncvt_f.x.w_nxv1f32_nxv1i64( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_f.x.w_nxv1f32_nxv1i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_f.x.w_nxv1f32_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -225,7 +227,7 @@ define @intrinsic_vfncvt_f.x.w_nxv1f32_nxv1i64( @llvm.riscv.vfncvt.f.x.w.nxv1f32.nxv1i64( %0, - i32 %1) + iXLen %1) ret %a } @@ -234,10 +236,10 @@ declare @llvm.riscv.vfncvt.f.x.w.mask.nxv1f32.nxv1i64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_f.x.w_nxv1f32_nxv1i64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_f.x.w_nxv1f32_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.x.w_nxv1f32_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -248,16 +250,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.f.x.w.nxv2f32.nxv2i64( , - i32); + iXLen); -define @intrinsic_vfncvt_f.x.w_nxv2f32_nxv2i64( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_f.x.w_nxv2f32_nxv2i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_f.x.w_nxv2f32_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -267,7 +269,7 @@ define @intrinsic_vfncvt_f.x.w_nxv2f32_nxv2i64( @llvm.riscv.vfncvt.f.x.w.nxv2f32.nxv2i64( %0, - i32 %1) + iXLen %1) ret %a } @@ -276,10 +278,10 @@ declare @llvm.riscv.vfncvt.f.x.w.mask.nxv2f32.nxv2i64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_f.x.w_nxv2f32_nxv2i64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_f.x.w_nxv2f32_nxv2i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.x.w_nxv2f32_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -290,16 +292,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.f.x.w.nxv4f32.nxv4i64( , - i32); + iXLen); -define @intrinsic_vfncvt_f.x.w_nxv4f32_nxv4i64( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_f.x.w_nxv4f32_nxv4i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_f.x.w_nxv4f32_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -309,7 +311,7 @@ define @intrinsic_vfncvt_f.x.w_nxv4f32_nxv4i64( @llvm.riscv.vfncvt.f.x.w.nxv4f32.nxv4i64( %0, - i32 %1) + iXLen %1) ret %a } @@ -318,10 +320,10 @@ declare @llvm.riscv.vfncvt.f.x.w.mask.nxv4f32.nxv4i64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_f.x.w_nxv4f32_nxv4i64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_f.x.w_nxv4f32_nxv4i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.x.w_nxv4f32_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -332,16 +334,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.f.x.w.nxv8f32.nxv8i64( , - i32); + iXLen); -define @intrinsic_vfncvt_f.x.w_nxv8f32_nxv8i64( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_f.x.w_nxv8f32_nxv8i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_f.x.w_nxv8f32_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -351,7 +353,7 @@ define @intrinsic_vfncvt_f.x.w_nxv8f32_nxv8i64( @llvm.riscv.vfncvt.f.x.w.nxv8f32.nxv8i64( %0, - i32 %1) + iXLen %1) ret %a } @@ -360,10 +362,10 @@ declare @llvm.riscv.vfncvt.f.x.w.mask.nxv8f32.nxv8i64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_f.x.w_nxv8f32_nxv8i64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_f.x.w_nxv8f32_nxv8i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.x.w_nxv8f32_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -374,7 +376,7 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-xu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-xu-rv64.ll deleted file mode 100644 index 32a23932b074..000000000000 --- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-xu-rv64.ll +++ /dev/null @@ -1,380 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s -declare @llvm.riscv.vfncvt.f.xu.w.nxv1f16.nxv1i32( - , - i64); - -define @intrinsic_vfncvt_f.xu.w_nxv1f16_nxv1i32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_f.xu.w_nxv1f16_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfncvt.f.xu.w v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.f.xu.w.nxv1f16.nxv1i32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.f.xu.w.mask.nxv1f16.nxv1i32( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_f.xu.w_nxv1f16_nxv1i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_f.xu.w_nxv1f16_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfncvt.f.xu.w v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv1f16.nxv1i32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.f.xu.w.nxv2f16.nxv2i32( - , - i64); - -define @intrinsic_vfncvt_f.xu.w_nxv2f16_nxv2i32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_f.xu.w_nxv2f16_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfncvt.f.xu.w v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.f.xu.w.nxv2f16.nxv2i32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.f.xu.w.mask.nxv2f16.nxv2i32( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_f.xu.w_nxv2f16_nxv2i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_f.xu.w_nxv2f16_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfncvt.f.xu.w v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv2f16.nxv2i32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.f.xu.w.nxv4f16.nxv4i32( - , - i64); - -define @intrinsic_vfncvt_f.xu.w_nxv4f16_nxv4i32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_f.xu.w_nxv4f16_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfncvt.f.xu.w v10, v8 -; CHECK-NEXT: vmv.v.v v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.f.xu.w.nxv4f16.nxv4i32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.f.xu.w.mask.nxv4f16.nxv4i32( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_f.xu.w_nxv4f16_nxv4i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_f.xu.w_nxv4f16_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfncvt.f.xu.w v8, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv4f16.nxv4i32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.f.xu.w.nxv8f16.nxv8i32( - , - i64); - -define @intrinsic_vfncvt_f.xu.w_nxv8f16_nxv8i32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_f.xu.w_nxv8f16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfncvt.f.xu.w v12, v8 -; CHECK-NEXT: vmv.v.v v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.f.xu.w.nxv8f16.nxv8i32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.f.xu.w.mask.nxv8f16.nxv8i32( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_f.xu.w_nxv8f16_nxv8i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_f.xu.w_nxv8f16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfncvt.f.xu.w v8, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv8f16.nxv8i32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.f.xu.w.nxv16f16.nxv16i32( - , - i64); - -define @intrinsic_vfncvt_f.xu.w_nxv16f16_nxv16i32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_f.xu.w_nxv16f16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfncvt.f.xu.w v16, v8 -; CHECK-NEXT: vmv.v.v v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.f.xu.w.nxv16f16.nxv16i32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.f.xu.w.mask.nxv16f16.nxv16i32( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_f.xu.w_nxv16f16_nxv16i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_f.xu.w_nxv16f16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfncvt.f.xu.w v8, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv16f16.nxv16i32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.f.xu.w.nxv1f32.nxv1i64( - , - i64); - -define @intrinsic_vfncvt_f.xu.w_nxv1f32_nxv1i64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_f.xu.w_nxv1f32_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfncvt.f.xu.w v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.f.xu.w.nxv1f32.nxv1i64( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.f.xu.w.mask.nxv1f32.nxv1i64( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_f.xu.w_nxv1f32_nxv1i64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_f.xu.w_nxv1f32_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfncvt.f.xu.w v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv1f32.nxv1i64( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.f.xu.w.nxv2f32.nxv2i64( - , - i64); - -define @intrinsic_vfncvt_f.xu.w_nxv2f32_nxv2i64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_f.xu.w_nxv2f32_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfncvt.f.xu.w v10, v8 -; CHECK-NEXT: vmv.v.v v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.f.xu.w.nxv2f32.nxv2i64( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.f.xu.w.mask.nxv2f32.nxv2i64( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_f.xu.w_nxv2f32_nxv2i64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_f.xu.w_nxv2f32_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfncvt.f.xu.w v8, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv2f32.nxv2i64( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.f.xu.w.nxv4f32.nxv4i64( - , - i64); - -define @intrinsic_vfncvt_f.xu.w_nxv4f32_nxv4i64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_f.xu.w_nxv4f32_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfncvt.f.xu.w v12, v8 -; CHECK-NEXT: vmv.v.v v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.f.xu.w.nxv4f32.nxv4i64( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.f.xu.w.mask.nxv4f32.nxv4i64( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_f.xu.w_nxv4f32_nxv4i64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_f.xu.w_nxv4f32_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfncvt.f.xu.w v8, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv4f32.nxv4i64( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.f.xu.w.nxv8f32.nxv8i64( - , - i64); - -define @intrinsic_vfncvt_f.xu.w_nxv8f32_nxv8i64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_f.xu.w_nxv8f32_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfncvt.f.xu.w v16, v8 -; CHECK-NEXT: vmv.v.v v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.f.xu.w.nxv8f32.nxv8i64( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.f.xu.w.mask.nxv8f32.nxv8i64( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_f.xu.w_nxv8f32_nxv8i64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_f.xu.w_nxv8f32_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfncvt.f.xu.w v8, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv8f32.nxv8i64( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-xu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-xu.ll similarity index 87% rename from llvm/test/CodeGen/RISCV/rvv/vfncvt-f-xu-rv32.ll rename to llvm/test/CodeGen/RISCV/rvv/vfncvt-f-xu.ll index 4835d4e5c591..e6842b749492 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-xu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-xu.ll @@ -1,11 +1,13 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfncvt.f.xu.w.nxv1f16.nxv1i32( , - i32); + iXLen); -define @intrinsic_vfncvt_f.xu.w_nxv1f16_nxv1i32( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_f.xu.w_nxv1f16_nxv1i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_f.xu.w_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -15,7 +17,7 @@ define @intrinsic_vfncvt_f.xu.w_nxv1f16_nxv1i32( @llvm.riscv.vfncvt.f.xu.w.nxv1f16.nxv1i32( %0, - i32 %1) + iXLen %1) ret %a } @@ -24,10 +26,10 @@ declare @llvm.riscv.vfncvt.f.xu.w.mask.nxv1f16.nxv1i32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_f.xu.w_nxv1f16_nxv1i32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_f.xu.w_nxv1f16_nxv1i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.xu.w_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -38,16 +40,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.f.xu.w.nxv2f16.nxv2i32( , - i32); + iXLen); -define @intrinsic_vfncvt_f.xu.w_nxv2f16_nxv2i32( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_f.xu.w_nxv2f16_nxv2i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_f.xu.w_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -57,7 +59,7 @@ define @intrinsic_vfncvt_f.xu.w_nxv2f16_nxv2i32( @llvm.riscv.vfncvt.f.xu.w.nxv2f16.nxv2i32( %0, - i32 %1) + iXLen %1) ret %a } @@ -66,10 +68,10 @@ declare @llvm.riscv.vfncvt.f.xu.w.mask.nxv2f16.nxv2i32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_f.xu.w_nxv2f16_nxv2i32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_f.xu.w_nxv2f16_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.xu.w_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -80,16 +82,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.f.xu.w.nxv4f16.nxv4i32( , - i32); + iXLen); -define @intrinsic_vfncvt_f.xu.w_nxv4f16_nxv4i32( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_f.xu.w_nxv4f16_nxv4i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_f.xu.w_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -99,7 +101,7 @@ define @intrinsic_vfncvt_f.xu.w_nxv4f16_nxv4i32( @llvm.riscv.vfncvt.f.xu.w.nxv4f16.nxv4i32( %0, - i32 %1) + iXLen %1) ret %a } @@ -108,10 +110,10 @@ declare @llvm.riscv.vfncvt.f.xu.w.mask.nxv4f16.nxv4i32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_f.xu.w_nxv4f16_nxv4i32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_f.xu.w_nxv4f16_nxv4i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.xu.w_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -122,16 +124,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.f.xu.w.nxv8f16.nxv8i32( , - i32); + iXLen); -define @intrinsic_vfncvt_f.xu.w_nxv8f16_nxv8i32( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_f.xu.w_nxv8f16_nxv8i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_f.xu.w_nxv8f16_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -141,7 +143,7 @@ define @intrinsic_vfncvt_f.xu.w_nxv8f16_nxv8i32( @llvm.riscv.vfncvt.f.xu.w.nxv8f16.nxv8i32( %0, - i32 %1) + iXLen %1) ret %a } @@ -150,10 +152,10 @@ declare @llvm.riscv.vfncvt.f.xu.w.mask.nxv8f16.nxv8i32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_f.xu.w_nxv8f16_nxv8i32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_f.xu.w_nxv8f16_nxv8i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.xu.w_nxv8f16_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -164,16 +166,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.f.xu.w.nxv16f16.nxv16i32( , - i32); + iXLen); -define @intrinsic_vfncvt_f.xu.w_nxv16f16_nxv16i32( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_f.xu.w_nxv16f16_nxv16i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_f.xu.w_nxv16f16_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -183,7 +185,7 @@ define @intrinsic_vfncvt_f.xu.w_nxv16f16_nxv16i32( @llvm.riscv.vfncvt.f.xu.w.nxv16f16.nxv16i32( %0, - i32 %1) + iXLen %1) ret %a } @@ -192,10 +194,10 @@ declare @llvm.riscv.vfncvt.f.xu.w.mask.nxv16f16.nxv16i32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_f.xu.w_nxv16f16_nxv16i32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_f.xu.w_nxv16f16_nxv16i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.xu.w_nxv16f16_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -206,16 +208,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.f.xu.w.nxv1f32.nxv1i64( , - i32); + iXLen); -define @intrinsic_vfncvt_f.xu.w_nxv1f32_nxv1i64( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_f.xu.w_nxv1f32_nxv1i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_f.xu.w_nxv1f32_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -225,7 +227,7 @@ define @intrinsic_vfncvt_f.xu.w_nxv1f32_nxv1i64( @llvm.riscv.vfncvt.f.xu.w.nxv1f32.nxv1i64( %0, - i32 %1) + iXLen %1) ret %a } @@ -234,10 +236,10 @@ declare @llvm.riscv.vfncvt.f.xu.w.mask.nxv1f32.nxv1i64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_f.xu.w_nxv1f32_nxv1i64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_f.xu.w_nxv1f32_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.xu.w_nxv1f32_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -248,16 +250,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.f.xu.w.nxv2f32.nxv2i64( , - i32); + iXLen); -define @intrinsic_vfncvt_f.xu.w_nxv2f32_nxv2i64( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_f.xu.w_nxv2f32_nxv2i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_f.xu.w_nxv2f32_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -267,7 +269,7 @@ define @intrinsic_vfncvt_f.xu.w_nxv2f32_nxv2i64( @llvm.riscv.vfncvt.f.xu.w.nxv2f32.nxv2i64( %0, - i32 %1) + iXLen %1) ret %a } @@ -276,10 +278,10 @@ declare @llvm.riscv.vfncvt.f.xu.w.mask.nxv2f32.nxv2i64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_f.xu.w_nxv2f32_nxv2i64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_f.xu.w_nxv2f32_nxv2i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.xu.w_nxv2f32_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -290,16 +292,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.f.xu.w.nxv4f32.nxv4i64( , - i32); + iXLen); -define @intrinsic_vfncvt_f.xu.w_nxv4f32_nxv4i64( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_f.xu.w_nxv4f32_nxv4i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_f.xu.w_nxv4f32_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -309,7 +311,7 @@ define @intrinsic_vfncvt_f.xu.w_nxv4f32_nxv4i64( @llvm.riscv.vfncvt.f.xu.w.nxv4f32.nxv4i64( %0, - i32 %1) + iXLen %1) ret %a } @@ -318,10 +320,10 @@ declare @llvm.riscv.vfncvt.f.xu.w.mask.nxv4f32.nxv4i64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_f.xu.w_nxv4f32_nxv4i64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_f.xu.w_nxv4f32_nxv4i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.xu.w_nxv4f32_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -332,16 +334,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.f.xu.w.nxv8f32.nxv8i64( , - i32); + iXLen); -define @intrinsic_vfncvt_f.xu.w_nxv8f32_nxv8i64( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_f.xu.w_nxv8f32_nxv8i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_f.xu.w_nxv8f32_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -351,7 +353,7 @@ define @intrinsic_vfncvt_f.xu.w_nxv8f32_nxv8i64( @llvm.riscv.vfncvt.f.xu.w.nxv8f32.nxv8i64( %0, - i32 %1) + iXLen %1) ret %a } @@ -360,10 +362,10 @@ declare @llvm.riscv.vfncvt.f.xu.w.mask.nxv8f32.nxv8i64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_f.xu.w_nxv8f32_nxv8i64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_f.xu.w_nxv8f32_nxv8i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.xu.w_nxv8f32_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -374,7 +376,7 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-rod-f-f-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rod-f-f-rv64.ll deleted file mode 100644 index 4020c1d5d1a3..000000000000 --- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-rod-f-f-rv64.ll +++ /dev/null @@ -1,380 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s -declare @llvm.riscv.vfncvt.rod.f.f.w.nxv1f16.nxv1f32( - , - i64); - -define @intrinsic_vfncvt_rod.f.f.w_nxv1f16_nxv1f32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_rod.f.f.w_nxv1f16_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfncvt.rod.f.f.w v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.rod.f.f.w.nxv1f16.nxv1f32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1f16.nxv1f32( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_rod.f.f.w_nxv1f16_nxv1f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_rod.f.f.w_nxv1f16_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfncvt.rod.f.f.w v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1f16.nxv1f32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.rod.f.f.w.nxv2f16.nxv2f32( - , - i64); - -define @intrinsic_vfncvt_rod.f.f.w_nxv2f16_nxv2f32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_rod.f.f.w_nxv2f16_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfncvt.rod.f.f.w v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.rod.f.f.w.nxv2f16.nxv2f32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2f16.nxv2f32( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_rod.f.f.w_nxv2f16_nxv2f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_rod.f.f.w_nxv2f16_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfncvt.rod.f.f.w v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2f16.nxv2f32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.rod.f.f.w.nxv4f16.nxv4f32( - , - i64); - -define @intrinsic_vfncvt_rod.f.f.w_nxv4f16_nxv4f32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_rod.f.f.w_nxv4f16_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfncvt.rod.f.f.w v10, v8 -; CHECK-NEXT: vmv.v.v v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.rod.f.f.w.nxv4f16.nxv4f32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4f16.nxv4f32( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_rod.f.f.w_nxv4f16_nxv4f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_rod.f.f.w_nxv4f16_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfncvt.rod.f.f.w v8, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4f16.nxv4f32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.rod.f.f.w.nxv8f16.nxv8f32( - , - i64); - -define @intrinsic_vfncvt_rod.f.f.w_nxv8f16_nxv8f32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_rod.f.f.w_nxv8f16_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfncvt.rod.f.f.w v12, v8 -; CHECK-NEXT: vmv.v.v v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.rod.f.f.w.nxv8f16.nxv8f32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8f16.nxv8f32( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_rod.f.f.w_nxv8f16_nxv8f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_rod.f.f.w_nxv8f16_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfncvt.rod.f.f.w v8, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8f16.nxv8f32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.rod.f.f.w.nxv16f16.nxv16f32( - , - i64); - -define @intrinsic_vfncvt_rod.f.f.w_nxv16f16_nxv16f32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_rod.f.f.w_nxv16f16_nxv16f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfncvt.rod.f.f.w v16, v8 -; CHECK-NEXT: vmv.v.v v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.rod.f.f.w.nxv16f16.nxv16f32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv16f16.nxv16f32( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_rod.f.f.w_nxv16f16_nxv16f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_rod.f.f.w_nxv16f16_nxv16f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfncvt.rod.f.f.w v8, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv16f16.nxv16f32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.rod.f.f.w.nxv1f32.nxv1f64( - , - i64); - -define @intrinsic_vfncvt_rod.f.f.w_nxv1f32_nxv1f64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_rod.f.f.w_nxv1f32_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfncvt.rod.f.f.w v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.rod.f.f.w.nxv1f32.nxv1f64( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1f32.nxv1f64( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_rod.f.f.w_nxv1f32_nxv1f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_rod.f.f.w_nxv1f32_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfncvt.rod.f.f.w v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1f32.nxv1f64( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.rod.f.f.w.nxv2f32.nxv2f64( - , - i64); - -define @intrinsic_vfncvt_rod.f.f.w_nxv2f32_nxv2f64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_rod.f.f.w_nxv2f32_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfncvt.rod.f.f.w v10, v8 -; CHECK-NEXT: vmv.v.v v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.rod.f.f.w.nxv2f32.nxv2f64( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2f32.nxv2f64( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_rod.f.f.w_nxv2f32_nxv2f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_rod.f.f.w_nxv2f32_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfncvt.rod.f.f.w v8, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2f32.nxv2f64( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.rod.f.f.w.nxv4f32.nxv4f64( - , - i64); - -define @intrinsic_vfncvt_rod.f.f.w_nxv4f32_nxv4f64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_rod.f.f.w_nxv4f32_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfncvt.rod.f.f.w v12, v8 -; CHECK-NEXT: vmv.v.v v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.rod.f.f.w.nxv4f32.nxv4f64( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4f32.nxv4f64( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_rod.f.f.w_nxv4f32_nxv4f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_rod.f.f.w_nxv4f32_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfncvt.rod.f.f.w v8, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4f32.nxv4f64( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.rod.f.f.w.nxv8f32.nxv8f64( - , - i64); - -define @intrinsic_vfncvt_rod.f.f.w_nxv8f32_nxv8f64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_rod.f.f.w_nxv8f32_nxv8f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfncvt.rod.f.f.w v16, v8 -; CHECK-NEXT: vmv.v.v v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.rod.f.f.w.nxv8f32.nxv8f64( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8f32.nxv8f64( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_rod.f.f.w_nxv8f32_nxv8f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_rod.f.f.w_nxv8f32_nxv8f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfncvt.rod.f.f.w v8, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8f32.nxv8f64( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-rod-f-f-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rod-f-f.ll similarity index 86% rename from llvm/test/CodeGen/RISCV/rvv/vfncvt-rod-f-f-rv32.ll rename to llvm/test/CodeGen/RISCV/rvv/vfncvt-rod-f-f.ll index b464fdde6db2..2a7c30939f10 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-rod-f-f-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rod-f-f.ll @@ -1,11 +1,13 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfncvt.rod.f.f.w.nxv1f16.nxv1f32( , - i32); + iXLen); -define @intrinsic_vfncvt_rod.f.f.w_nxv1f16_nxv1f32( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_rod.f.f.w_nxv1f16_nxv1f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rod.f.f.w_nxv1f16_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -15,7 +17,7 @@ define @intrinsic_vfncvt_rod.f.f.w_nxv1f16_nxv1f32( @llvm.riscv.vfncvt.rod.f.f.w.nxv1f16.nxv1f32( %0, - i32 %1) + iXLen %1) ret %a } @@ -24,10 +26,10 @@ declare @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1f16.nxv1f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_rod.f.f.w_nxv1f16_nxv1f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_rod.f.f.w_nxv1f16_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rod.f.f.w_nxv1f16_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -38,16 +40,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.rod.f.f.w.nxv2f16.nxv2f32( , - i32); + iXLen); -define @intrinsic_vfncvt_rod.f.f.w_nxv2f16_nxv2f32( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_rod.f.f.w_nxv2f16_nxv2f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rod.f.f.w_nxv2f16_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -57,7 +59,7 @@ define @intrinsic_vfncvt_rod.f.f.w_nxv2f16_nxv2f32( @llvm.riscv.vfncvt.rod.f.f.w.nxv2f16.nxv2f32( %0, - i32 %1) + iXLen %1) ret %a } @@ -66,10 +68,10 @@ declare @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2f16.nxv2f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_rod.f.f.w_nxv2f16_nxv2f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_rod.f.f.w_nxv2f16_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rod.f.f.w_nxv2f16_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -80,16 +82,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.rod.f.f.w.nxv4f16.nxv4f32( , - i32); + iXLen); -define @intrinsic_vfncvt_rod.f.f.w_nxv4f16_nxv4f32( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_rod.f.f.w_nxv4f16_nxv4f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rod.f.f.w_nxv4f16_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -99,7 +101,7 @@ define @intrinsic_vfncvt_rod.f.f.w_nxv4f16_nxv4f32( @llvm.riscv.vfncvt.rod.f.f.w.nxv4f16.nxv4f32( %0, - i32 %1) + iXLen %1) ret %a } @@ -108,10 +110,10 @@ declare @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4f16.nxv4f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_rod.f.f.w_nxv4f16_nxv4f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_rod.f.f.w_nxv4f16_nxv4f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rod.f.f.w_nxv4f16_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -122,16 +124,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.rod.f.f.w.nxv8f16.nxv8f32( , - i32); + iXLen); -define @intrinsic_vfncvt_rod.f.f.w_nxv8f16_nxv8f32( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_rod.f.f.w_nxv8f16_nxv8f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rod.f.f.w_nxv8f16_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -141,7 +143,7 @@ define @intrinsic_vfncvt_rod.f.f.w_nxv8f16_nxv8f32( @llvm.riscv.vfncvt.rod.f.f.w.nxv8f16.nxv8f32( %0, - i32 %1) + iXLen %1) ret %a } @@ -150,10 +152,10 @@ declare @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8f16.nxv8f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_rod.f.f.w_nxv8f16_nxv8f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_rod.f.f.w_nxv8f16_nxv8f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rod.f.f.w_nxv8f16_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -164,16 +166,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.rod.f.f.w.nxv16f16.nxv16f32( , - i32); + iXLen); -define @intrinsic_vfncvt_rod.f.f.w_nxv16f16_nxv16f32( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_rod.f.f.w_nxv16f16_nxv16f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rod.f.f.w_nxv16f16_nxv16f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -183,7 +185,7 @@ define @intrinsic_vfncvt_rod.f.f.w_nxv16f16_nxv16f32( @llvm.riscv.vfncvt.rod.f.f.w.nxv16f16.nxv16f32( %0, - i32 %1) + iXLen %1) ret %a } @@ -192,10 +194,10 @@ declare @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv16f16.nxv16f32 , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_rod.f.f.w_nxv16f16_nxv16f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_rod.f.f.w_nxv16f16_nxv16f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rod.f.f.w_nxv16f16_nxv16f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -206,16 +208,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.rod.f.f.w.nxv1f32.nxv1f64( , - i32); + iXLen); -define @intrinsic_vfncvt_rod.f.f.w_nxv1f32_nxv1f64( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_rod.f.f.w_nxv1f32_nxv1f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rod.f.f.w_nxv1f32_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -225,7 +227,7 @@ define @intrinsic_vfncvt_rod.f.f.w_nxv1f32_nxv1f64( @llvm.riscv.vfncvt.rod.f.f.w.nxv1f32.nxv1f64( %0, - i32 %1) + iXLen %1) ret %a } @@ -234,10 +236,10 @@ declare @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1f32.nxv1f64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_rod.f.f.w_nxv1f32_nxv1f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_rod.f.f.w_nxv1f32_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rod.f.f.w_nxv1f32_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -248,16 +250,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.rod.f.f.w.nxv2f32.nxv2f64( , - i32); + iXLen); -define @intrinsic_vfncvt_rod.f.f.w_nxv2f32_nxv2f64( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_rod.f.f.w_nxv2f32_nxv2f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rod.f.f.w_nxv2f32_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -267,7 +269,7 @@ define @intrinsic_vfncvt_rod.f.f.w_nxv2f32_nxv2f64( @llvm.riscv.vfncvt.rod.f.f.w.nxv2f32.nxv2f64( %0, - i32 %1) + iXLen %1) ret %a } @@ -276,10 +278,10 @@ declare @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2f32.nxv2f64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_rod.f.f.w_nxv2f32_nxv2f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_rod.f.f.w_nxv2f32_nxv2f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rod.f.f.w_nxv2f32_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -290,16 +292,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.rod.f.f.w.nxv4f32.nxv4f64( , - i32); + iXLen); -define @intrinsic_vfncvt_rod.f.f.w_nxv4f32_nxv4f64( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_rod.f.f.w_nxv4f32_nxv4f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rod.f.f.w_nxv4f32_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -309,7 +311,7 @@ define @intrinsic_vfncvt_rod.f.f.w_nxv4f32_nxv4f64( @llvm.riscv.vfncvt.rod.f.f.w.nxv4f32.nxv4f64( %0, - i32 %1) + iXLen %1) ret %a } @@ -318,10 +320,10 @@ declare @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4f32.nxv4f64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_rod.f.f.w_nxv4f32_nxv4f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_rod.f.f.w_nxv4f32_nxv4f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rod.f.f.w_nxv4f32_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -332,16 +334,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.rod.f.f.w.nxv8f32.nxv8f64( , - i32); + iXLen); -define @intrinsic_vfncvt_rod.f.f.w_nxv8f32_nxv8f64( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_rod.f.f.w_nxv8f32_nxv8f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rod.f.f.w_nxv8f32_nxv8f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -351,7 +353,7 @@ define @intrinsic_vfncvt_rod.f.f.w_nxv8f32_nxv8f64( @llvm.riscv.vfncvt.rod.f.f.w.nxv8f32.nxv8f64( %0, - i32 %1) + iXLen %1) ret %a } @@ -360,10 +362,10 @@ declare @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8f32.nxv8f64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_rod.f.f.w_nxv8f32_nxv8f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_rod.f.f.w_nxv8f32_nxv8f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rod.f.f.w_nxv8f32_nxv8f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -374,7 +376,7 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-x-f-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-x-f-rv64.ll deleted file mode 100644 index ad695704aae0..000000000000 --- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-x-f-rv64.ll +++ /dev/null @@ -1,632 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s -declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i8.nxv1f16( - , - i64); - -define @intrinsic_vfncvt_rtz.x.f.w_nxv1i8_nxv1f16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv1i8_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu -; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i8.nxv1f16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i8.nxv1f16( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv1i8_nxv1f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv1i8_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu -; CHECK-NEXT: vfncvt.rtz.x.f.w v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i8.nxv1f16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i8.nxv2f16( - , - i64); - -define @intrinsic_vfncvt_rtz.x.f.w_nxv2i8_nxv2f16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv2i8_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu -; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i8.nxv2f16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i8.nxv2f16( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv2i8_nxv2f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv2i8_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu -; CHECK-NEXT: vfncvt.rtz.x.f.w v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i8.nxv2f16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i8.nxv4f16( - , - i64); - -define @intrinsic_vfncvt_rtz.x.f.w_nxv4i8_nxv4f16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv4i8_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu -; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i8.nxv4f16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i8.nxv4f16( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv4i8_nxv4f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv4i8_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu -; CHECK-NEXT: vfncvt.rtz.x.f.w v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i8.nxv4f16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i8.nxv8f16( - , - i64); - -define @intrinsic_vfncvt_rtz.x.f.w_nxv8i8_nxv8f16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv8i8_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu -; CHECK-NEXT: vfncvt.rtz.x.f.w v10, v8 -; CHECK-NEXT: vmv.v.v v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i8.nxv8f16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i8.nxv8f16( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv8i8_nxv8f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv8i8_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu -; CHECK-NEXT: vfncvt.rtz.x.f.w v8, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i8.nxv8f16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i8.nxv16f16( - , - i64); - -define @intrinsic_vfncvt_rtz.x.f.w_nxv16i8_nxv16f16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv16i8_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu -; CHECK-NEXT: vfncvt.rtz.x.f.w v12, v8 -; CHECK-NEXT: vmv.v.v v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i8.nxv16f16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i8.nxv16f16( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv16i8_nxv16f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv16i8_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu -; CHECK-NEXT: vfncvt.rtz.x.f.w v8, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i8.nxv16f16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv32i8.nxv32f16( - , - i64); - -define @intrinsic_vfncvt_rtz.x.f.w_nxv32i8_nxv32f16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv32i8_nxv32f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu -; CHECK-NEXT: vfncvt.rtz.x.f.w v16, v8 -; CHECK-NEXT: vmv.v.v v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv32i8.nxv32f16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv32i8.nxv32f16( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv32i8_nxv32f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv32i8_nxv32f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu -; CHECK-NEXT: vfncvt.rtz.x.f.w v8, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv32i8.nxv32f16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i16.nxv1f32( - , - i64); - -define @intrinsic_vfncvt_rtz.x.f.w_nxv1i16_nxv1f32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv1i16_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i16.nxv1f32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i16.nxv1f32( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv1i16_nxv1f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv1i16_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfncvt.rtz.x.f.w v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i16.nxv1f32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i16.nxv2f32( - , - i64); - -define @intrinsic_vfncvt_rtz.x.f.w_nxv2i16_nxv2f32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv2i16_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i16.nxv2f32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i16.nxv2f32( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv2i16_nxv2f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv2i16_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfncvt.rtz.x.f.w v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i16.nxv2f32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i16.nxv4f32( - , - i64); - -define @intrinsic_vfncvt_rtz.x.f.w_nxv4i16_nxv4f32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv4i16_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfncvt.rtz.x.f.w v10, v8 -; CHECK-NEXT: vmv.v.v v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i16.nxv4f32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i16.nxv4f32( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv4i16_nxv4f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv4i16_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfncvt.rtz.x.f.w v8, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i16.nxv4f32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i16.nxv8f32( - , - i64); - -define @intrinsic_vfncvt_rtz.x.f.w_nxv8i16_nxv8f32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv8i16_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfncvt.rtz.x.f.w v12, v8 -; CHECK-NEXT: vmv.v.v v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i16.nxv8f32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i16.nxv8f32( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv8i16_nxv8f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv8i16_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfncvt.rtz.x.f.w v8, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i16.nxv8f32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i16.nxv16f32( - , - i64); - -define @intrinsic_vfncvt_rtz.x.f.w_nxv16i16_nxv16f32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv16i16_nxv16f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfncvt.rtz.x.f.w v16, v8 -; CHECK-NEXT: vmv.v.v v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i16.nxv16f32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i16.nxv16f32( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv16i16_nxv16f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv16i16_nxv16f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfncvt.rtz.x.f.w v8, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i16.nxv16f32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i32.nxv1f64( - , - i64); - -define @intrinsic_vfncvt_rtz.x.f.w_nxv1i32_nxv1f64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv1i32_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i32.nxv1f64( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i32.nxv1f64( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv1i32_nxv1f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv1i32_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfncvt.rtz.x.f.w v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i32.nxv1f64( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i32.nxv2f64( - , - i64); - -define @intrinsic_vfncvt_rtz.x.f.w_nxv2i32_nxv2f64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv2i32_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfncvt.rtz.x.f.w v10, v8 -; CHECK-NEXT: vmv.v.v v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i32.nxv2f64( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i32.nxv2f64( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv2i32_nxv2f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv2i32_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfncvt.rtz.x.f.w v8, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i32.nxv2f64( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i32.nxv4f64( - , - i64); - -define @intrinsic_vfncvt_rtz.x.f.w_nxv4i32_nxv4f64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv4i32_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfncvt.rtz.x.f.w v12, v8 -; CHECK-NEXT: vmv.v.v v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i32.nxv4f64( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i32.nxv4f64( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv4i32_nxv4f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv4i32_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfncvt.rtz.x.f.w v8, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i32.nxv4f64( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i32.nxv8f64( - , - i64); - -define @intrinsic_vfncvt_rtz.x.f.w_nxv8i32_nxv8f64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv8i32_nxv8f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfncvt.rtz.x.f.w v16, v8 -; CHECK-NEXT: vmv.v.v v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i32.nxv8f64( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i32.nxv8f64( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv8i32_nxv8f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv8i32_nxv8f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfncvt.rtz.x.f.w v8, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i32.nxv8f64( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-x-f-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-x-f.ll similarity index 87% rename from llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-x-f-rv32.ll rename to llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-x-f.ll index 227210e6f2f0..9a14df186dd5 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-x-f-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-x-f.ll @@ -1,11 +1,13 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i8.nxv1f16( , - i32); + iXLen); -define @intrinsic_vfncvt_rtz.x.f.w_nxv1i8_nxv1f16( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_rtz.x.f.w_nxv1i8_nxv1f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv1i8_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu @@ -15,7 +17,7 @@ define @intrinsic_vfncvt_rtz.x.f.w_nxv1i8_nxv1f16( @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i8.nxv1f16( %0, - i32 %1) + iXLen %1) ret %a } @@ -24,10 +26,10 @@ declare @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i8.nxv1f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv1i8_nxv1f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv1i8_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv1i8_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu @@ -38,16 +40,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i8.nxv2f16( , - i32); + iXLen); -define @intrinsic_vfncvt_rtz.x.f.w_nxv2i8_nxv2f16( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_rtz.x.f.w_nxv2i8_nxv2f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv2i8_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu @@ -57,7 +59,7 @@ define @intrinsic_vfncvt_rtz.x.f.w_nxv2i8_nxv2f16( @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i8.nxv2f16( %0, - i32 %1) + iXLen %1) ret %a } @@ -66,10 +68,10 @@ declare @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i8.nxv2f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv2i8_nxv2f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv2i8_nxv2f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv2i8_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu @@ -80,16 +82,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i8.nxv4f16( , - i32); + iXLen); -define @intrinsic_vfncvt_rtz.x.f.w_nxv4i8_nxv4f16( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_rtz.x.f.w_nxv4i8_nxv4f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv4i8_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu @@ -99,7 +101,7 @@ define @intrinsic_vfncvt_rtz.x.f.w_nxv4i8_nxv4f16( @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i8.nxv4f16( %0, - i32 %1) + iXLen %1) ret %a } @@ -108,10 +110,10 @@ declare @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i8.nxv4f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv4i8_nxv4f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv4i8_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv4i8_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu @@ -122,16 +124,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i8.nxv8f16( , - i32); + iXLen); -define @intrinsic_vfncvt_rtz.x.f.w_nxv8i8_nxv8f16( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_rtz.x.f.w_nxv8i8_nxv8f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv8i8_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu @@ -141,7 +143,7 @@ define @intrinsic_vfncvt_rtz.x.f.w_nxv8i8_nxv8f16( @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i8.nxv8f16( %0, - i32 %1) + iXLen %1) ret %a } @@ -150,10 +152,10 @@ declare @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i8.nxv8f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv8i8_nxv8f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv8i8_nxv8f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv8i8_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu @@ -164,16 +166,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i8.nxv16f16( , - i32); + iXLen); -define @intrinsic_vfncvt_rtz.x.f.w_nxv16i8_nxv16f16( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_rtz.x.f.w_nxv16i8_nxv16f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv16i8_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu @@ -183,7 +185,7 @@ define @intrinsic_vfncvt_rtz.x.f.w_nxv16i8_nxv16f16( @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i8.nxv16f16( %0, - i32 %1) + iXLen %1) ret %a } @@ -192,10 +194,10 @@ declare @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i8.nxv16f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv16i8_nxv16f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv16i8_nxv16f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv16i8_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu @@ -206,16 +208,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv32i8.nxv32f16( , - i32); + iXLen); -define @intrinsic_vfncvt_rtz.x.f.w_nxv32i8_nxv32f16( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_rtz.x.f.w_nxv32i8_nxv32f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv32i8_nxv32f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu @@ -225,7 +227,7 @@ define @intrinsic_vfncvt_rtz.x.f.w_nxv32i8_nxv32f16( @llvm.riscv.vfncvt.rtz.x.f.w.nxv32i8.nxv32f16( %0, - i32 %1) + iXLen %1) ret %a } @@ -234,10 +236,10 @@ declare @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv32i8.nxv32f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv32i8_nxv32f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv32i8_nxv32f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv32i8_nxv32f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu @@ -248,16 +250,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i16.nxv1f32( , - i32); + iXLen); -define @intrinsic_vfncvt_rtz.x.f.w_nxv1i16_nxv1f32( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_rtz.x.f.w_nxv1i16_nxv1f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv1i16_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -267,7 +269,7 @@ define @intrinsic_vfncvt_rtz.x.f.w_nxv1i16_nxv1f32( @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i16.nxv1f32( %0, - i32 %1) + iXLen %1) ret %a } @@ -276,10 +278,10 @@ declare @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i16.nxv1f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv1i16_nxv1f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv1i16_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv1i16_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -290,16 +292,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i16.nxv2f32( , - i32); + iXLen); -define @intrinsic_vfncvt_rtz.x.f.w_nxv2i16_nxv2f32( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_rtz.x.f.w_nxv2i16_nxv2f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv2i16_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -309,7 +311,7 @@ define @intrinsic_vfncvt_rtz.x.f.w_nxv2i16_nxv2f32( @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i16.nxv2f32( %0, - i32 %1) + iXLen %1) ret %a } @@ -318,10 +320,10 @@ declare @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i16.nxv2f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv2i16_nxv2f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv2i16_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv2i16_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -332,16 +334,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i16.nxv4f32( , - i32); + iXLen); -define @intrinsic_vfncvt_rtz.x.f.w_nxv4i16_nxv4f32( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_rtz.x.f.w_nxv4i16_nxv4f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv4i16_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -351,7 +353,7 @@ define @intrinsic_vfncvt_rtz.x.f.w_nxv4i16_nxv4f32( @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i16.nxv4f32( %0, - i32 %1) + iXLen %1) ret %a } @@ -360,10 +362,10 @@ declare @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i16.nxv4f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv4i16_nxv4f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv4i16_nxv4f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv4i16_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -374,16 +376,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i16.nxv8f32( , - i32); + iXLen); -define @intrinsic_vfncvt_rtz.x.f.w_nxv8i16_nxv8f32( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_rtz.x.f.w_nxv8i16_nxv8f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv8i16_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -393,7 +395,7 @@ define @intrinsic_vfncvt_rtz.x.f.w_nxv8i16_nxv8f32( @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i16.nxv8f32( %0, - i32 %1) + iXLen %1) ret %a } @@ -402,10 +404,10 @@ declare @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i16.nxv8f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv8i16_nxv8f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv8i16_nxv8f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv8i16_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -416,16 +418,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i16.nxv16f32( , - i32); + iXLen); -define @intrinsic_vfncvt_rtz.x.f.w_nxv16i16_nxv16f32( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_rtz.x.f.w_nxv16i16_nxv16f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv16i16_nxv16f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -435,7 +437,7 @@ define @intrinsic_vfncvt_rtz.x.f.w_nxv16i16_nxv16f32( @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i16.nxv16f32( %0, - i32 %1) + iXLen %1) ret %a } @@ -444,10 +446,10 @@ declare @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i16.nxv16f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv16i16_nxv16f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv16i16_nxv16f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv16i16_nxv16f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -458,16 +460,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i32.nxv1f64( , - i32); + iXLen); -define @intrinsic_vfncvt_rtz.x.f.w_nxv1i32_nxv1f64( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_rtz.x.f.w_nxv1i32_nxv1f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv1i32_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -477,7 +479,7 @@ define @intrinsic_vfncvt_rtz.x.f.w_nxv1i32_nxv1f64( @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i32.nxv1f64( %0, - i32 %1) + iXLen %1) ret %a } @@ -486,10 +488,10 @@ declare @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i32.nxv1f64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv1i32_nxv1f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv1i32_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv1i32_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -500,16 +502,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i32.nxv2f64( , - i32); + iXLen); -define @intrinsic_vfncvt_rtz.x.f.w_nxv2i32_nxv2f64( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_rtz.x.f.w_nxv2i32_nxv2f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv2i32_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -519,7 +521,7 @@ define @intrinsic_vfncvt_rtz.x.f.w_nxv2i32_nxv2f64( @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i32.nxv2f64( %0, - i32 %1) + iXLen %1) ret %a } @@ -528,10 +530,10 @@ declare @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i32.nxv2f64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv2i32_nxv2f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv2i32_nxv2f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv2i32_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -542,16 +544,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i32.nxv4f64( , - i32); + iXLen); -define @intrinsic_vfncvt_rtz.x.f.w_nxv4i32_nxv4f64( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_rtz.x.f.w_nxv4i32_nxv4f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv4i32_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -561,7 +563,7 @@ define @intrinsic_vfncvt_rtz.x.f.w_nxv4i32_nxv4f64( @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i32.nxv4f64( %0, - i32 %1) + iXLen %1) ret %a } @@ -570,10 +572,10 @@ declare @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i32.nxv4f64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv4i32_nxv4f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv4i32_nxv4f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv4i32_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -584,16 +586,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i32.nxv8f64( , - i32); + iXLen); -define @intrinsic_vfncvt_rtz.x.f.w_nxv8i32_nxv8f64( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_rtz.x.f.w_nxv8i32_nxv8f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv8i32_nxv8f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -603,7 +605,7 @@ define @intrinsic_vfncvt_rtz.x.f.w_nxv8i32_nxv8f64( @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i32.nxv8f64( %0, - i32 %1) + iXLen %1) ret %a } @@ -612,10 +614,10 @@ declare @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i32.nxv8f64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv8i32_nxv8f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv8i32_nxv8f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv8i32_nxv8f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -626,7 +628,7 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-xu-f-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-xu-f-rv64.ll deleted file mode 100644 index f7f873dd0515..000000000000 --- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-xu-f-rv64.ll +++ /dev/null @@ -1,632 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s -declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i8.nxv1f16( - , - i64); - -define @intrinsic_vfncvt_rtz.xu.f.w_nxv1i8_nxv1f16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv1i8_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu -; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i8.nxv1f16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i8.nxv1f16( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv1i8_nxv1f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv1i8_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu -; CHECK-NEXT: vfncvt.rtz.xu.f.w v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i8.nxv1f16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i8.nxv2f16( - , - i64); - -define @intrinsic_vfncvt_rtz.xu.f.w_nxv2i8_nxv2f16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv2i8_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu -; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i8.nxv2f16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i8.nxv2f16( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv2i8_nxv2f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv2i8_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu -; CHECK-NEXT: vfncvt.rtz.xu.f.w v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i8.nxv2f16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i8.nxv4f16( - , - i64); - -define @intrinsic_vfncvt_rtz.xu.f.w_nxv4i8_nxv4f16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv4i8_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu -; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i8.nxv4f16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i8.nxv4f16( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv4i8_nxv4f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv4i8_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu -; CHECK-NEXT: vfncvt.rtz.xu.f.w v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i8.nxv4f16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i8.nxv8f16( - , - i64); - -define @intrinsic_vfncvt_rtz.xu.f.w_nxv8i8_nxv8f16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv8i8_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu -; CHECK-NEXT: vfncvt.rtz.xu.f.w v10, v8 -; CHECK-NEXT: vmv.v.v v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i8.nxv8f16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i8.nxv8f16( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv8i8_nxv8f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv8i8_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu -; CHECK-NEXT: vfncvt.rtz.xu.f.w v8, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i8.nxv8f16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i8.nxv16f16( - , - i64); - -define @intrinsic_vfncvt_rtz.xu.f.w_nxv16i8_nxv16f16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv16i8_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu -; CHECK-NEXT: vfncvt.rtz.xu.f.w v12, v8 -; CHECK-NEXT: vmv.v.v v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i8.nxv16f16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i8.nxv16f16( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv16i8_nxv16f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv16i8_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu -; CHECK-NEXT: vfncvt.rtz.xu.f.w v8, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i8.nxv16f16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv32i8.nxv32f16( - , - i64); - -define @intrinsic_vfncvt_rtz.xu.f.w_nxv32i8_nxv32f16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv32i8_nxv32f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu -; CHECK-NEXT: vfncvt.rtz.xu.f.w v16, v8 -; CHECK-NEXT: vmv.v.v v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv32i8.nxv32f16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv32i8.nxv32f16( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv32i8_nxv32f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv32i8_nxv32f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu -; CHECK-NEXT: vfncvt.rtz.xu.f.w v8, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv32i8.nxv32f16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i16.nxv1f32( - , - i64); - -define @intrinsic_vfncvt_rtz.xu.f.w_nxv1i16_nxv1f32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv1i16_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i16.nxv1f32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i16.nxv1f32( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv1i16_nxv1f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv1i16_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfncvt.rtz.xu.f.w v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i16.nxv1f32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i16.nxv2f32( - , - i64); - -define @intrinsic_vfncvt_rtz.xu.f.w_nxv2i16_nxv2f32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv2i16_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i16.nxv2f32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i16.nxv2f32( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv2i16_nxv2f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv2i16_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfncvt.rtz.xu.f.w v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i16.nxv2f32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i16.nxv4f32( - , - i64); - -define @intrinsic_vfncvt_rtz.xu.f.w_nxv4i16_nxv4f32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv4i16_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfncvt.rtz.xu.f.w v10, v8 -; CHECK-NEXT: vmv.v.v v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i16.nxv4f32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i16.nxv4f32( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv4i16_nxv4f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv4i16_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfncvt.rtz.xu.f.w v8, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i16.nxv4f32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i16.nxv8f32( - , - i64); - -define @intrinsic_vfncvt_rtz.xu.f.w_nxv8i16_nxv8f32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv8i16_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfncvt.rtz.xu.f.w v12, v8 -; CHECK-NEXT: vmv.v.v v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i16.nxv8f32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i16.nxv8f32( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv8i16_nxv8f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv8i16_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfncvt.rtz.xu.f.w v8, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i16.nxv8f32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i16.nxv16f32( - , - i64); - -define @intrinsic_vfncvt_rtz.xu.f.w_nxv16i16_nxv16f32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv16i16_nxv16f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfncvt.rtz.xu.f.w v16, v8 -; CHECK-NEXT: vmv.v.v v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i16.nxv16f32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i16.nxv16f32( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv16i16_nxv16f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv16i16_nxv16f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfncvt.rtz.xu.f.w v8, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i16.nxv16f32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i32.nxv1f64( - , - i64); - -define @intrinsic_vfncvt_rtz.xu.f.w_nxv1i32_nxv1f64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv1i32_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i32.nxv1f64( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i32.nxv1f64( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv1i32_nxv1f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv1i32_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfncvt.rtz.xu.f.w v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i32.nxv1f64( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i32.nxv2f64( - , - i64); - -define @intrinsic_vfncvt_rtz.xu.f.w_nxv2i32_nxv2f64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv2i32_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfncvt.rtz.xu.f.w v10, v8 -; CHECK-NEXT: vmv.v.v v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i32.nxv2f64( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i32.nxv2f64( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv2i32_nxv2f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv2i32_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfncvt.rtz.xu.f.w v8, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i32.nxv2f64( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i32.nxv4f64( - , - i64); - -define @intrinsic_vfncvt_rtz.xu.f.w_nxv4i32_nxv4f64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv4i32_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfncvt.rtz.xu.f.w v12, v8 -; CHECK-NEXT: vmv.v.v v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i32.nxv4f64( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i32.nxv4f64( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv4i32_nxv4f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv4i32_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfncvt.rtz.xu.f.w v8, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i32.nxv4f64( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i32.nxv8f64( - , - i64); - -define @intrinsic_vfncvt_rtz.xu.f.w_nxv8i32_nxv8f64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv8i32_nxv8f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfncvt.rtz.xu.f.w v16, v8 -; CHECK-NEXT: vmv.v.v v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i32.nxv8f64( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i32.nxv8f64( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv8i32_nxv8f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv8i32_nxv8f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfncvt.rtz.xu.f.w v8, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i32.nxv8f64( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-xu-f-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-xu-f.ll similarity index 87% rename from llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-xu-f-rv32.ll rename to llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-xu-f.ll index 4bfe331db7e0..24d75d6d09b1 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-xu-f-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-xu-f.ll @@ -1,11 +1,13 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i8.nxv1f16( , - i32); + iXLen); -define @intrinsic_vfncvt_rtz.xu.f.w_nxv1i8_nxv1f16( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_rtz.xu.f.w_nxv1i8_nxv1f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv1i8_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu @@ -15,7 +17,7 @@ define @intrinsic_vfncvt_rtz.xu.f.w_nxv1i8_nxv1f16( @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i8.nxv1f16( %0, - i32 %1) + iXLen %1) ret %a } @@ -24,10 +26,10 @@ declare @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i8.nxv1f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv1i8_nxv1f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv1i8_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv1i8_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu @@ -38,16 +40,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i8.nxv2f16( , - i32); + iXLen); -define @intrinsic_vfncvt_rtz.xu.f.w_nxv2i8_nxv2f16( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_rtz.xu.f.w_nxv2i8_nxv2f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv2i8_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu @@ -57,7 +59,7 @@ define @intrinsic_vfncvt_rtz.xu.f.w_nxv2i8_nxv2f16( @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i8.nxv2f16( %0, - i32 %1) + iXLen %1) ret %a } @@ -66,10 +68,10 @@ declare @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i8.nxv2f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv2i8_nxv2f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv2i8_nxv2f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv2i8_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu @@ -80,16 +82,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i8.nxv4f16( , - i32); + iXLen); -define @intrinsic_vfncvt_rtz.xu.f.w_nxv4i8_nxv4f16( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_rtz.xu.f.w_nxv4i8_nxv4f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv4i8_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu @@ -99,7 +101,7 @@ define @intrinsic_vfncvt_rtz.xu.f.w_nxv4i8_nxv4f16( @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i8.nxv4f16( %0, - i32 %1) + iXLen %1) ret %a } @@ -108,10 +110,10 @@ declare @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i8.nxv4f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv4i8_nxv4f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv4i8_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv4i8_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu @@ -122,16 +124,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i8.nxv8f16( , - i32); + iXLen); -define @intrinsic_vfncvt_rtz.xu.f.w_nxv8i8_nxv8f16( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_rtz.xu.f.w_nxv8i8_nxv8f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv8i8_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu @@ -141,7 +143,7 @@ define @intrinsic_vfncvt_rtz.xu.f.w_nxv8i8_nxv8f16( @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i8.nxv8f16( %0, - i32 %1) + iXLen %1) ret %a } @@ -150,10 +152,10 @@ declare @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i8.nxv8f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv8i8_nxv8f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv8i8_nxv8f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv8i8_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu @@ -164,16 +166,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i8.nxv16f16( , - i32); + iXLen); -define @intrinsic_vfncvt_rtz.xu.f.w_nxv16i8_nxv16f16( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_rtz.xu.f.w_nxv16i8_nxv16f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv16i8_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu @@ -183,7 +185,7 @@ define @intrinsic_vfncvt_rtz.xu.f.w_nxv16i8_nxv16f16( @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i8.nxv16f16( %0, - i32 %1) + iXLen %1) ret %a } @@ -192,10 +194,10 @@ declare @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i8.nxv16f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv16i8_nxv16f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv16i8_nxv16f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv16i8_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu @@ -206,16 +208,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv32i8.nxv32f16( , - i32); + iXLen); -define @intrinsic_vfncvt_rtz.xu.f.w_nxv32i8_nxv32f16( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_rtz.xu.f.w_nxv32i8_nxv32f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv32i8_nxv32f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu @@ -225,7 +227,7 @@ define @intrinsic_vfncvt_rtz.xu.f.w_nxv32i8_nxv32f16( @llvm.riscv.vfncvt.rtz.xu.f.w.nxv32i8.nxv32f16( %0, - i32 %1) + iXLen %1) ret %a } @@ -234,10 +236,10 @@ declare @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv32i8.nxv32f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv32i8_nxv32f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv32i8_nxv32f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv32i8_nxv32f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu @@ -248,16 +250,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i16.nxv1f32( , - i32); + iXLen); -define @intrinsic_vfncvt_rtz.xu.f.w_nxv1i16_nxv1f32( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_rtz.xu.f.w_nxv1i16_nxv1f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv1i16_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -267,7 +269,7 @@ define @intrinsic_vfncvt_rtz.xu.f.w_nxv1i16_nxv1f32( @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i16.nxv1f32( %0, - i32 %1) + iXLen %1) ret %a } @@ -276,10 +278,10 @@ declare @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i16.nxv1f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv1i16_nxv1f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv1i16_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv1i16_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -290,16 +292,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i16.nxv2f32( , - i32); + iXLen); -define @intrinsic_vfncvt_rtz.xu.f.w_nxv2i16_nxv2f32( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_rtz.xu.f.w_nxv2i16_nxv2f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv2i16_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -309,7 +311,7 @@ define @intrinsic_vfncvt_rtz.xu.f.w_nxv2i16_nxv2f32( @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i16.nxv2f32( %0, - i32 %1) + iXLen %1) ret %a } @@ -318,10 +320,10 @@ declare @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i16.nxv2f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv2i16_nxv2f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv2i16_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv2i16_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -332,16 +334,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i16.nxv4f32( , - i32); + iXLen); -define @intrinsic_vfncvt_rtz.xu.f.w_nxv4i16_nxv4f32( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_rtz.xu.f.w_nxv4i16_nxv4f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv4i16_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -351,7 +353,7 @@ define @intrinsic_vfncvt_rtz.xu.f.w_nxv4i16_nxv4f32( @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i16.nxv4f32( %0, - i32 %1) + iXLen %1) ret %a } @@ -360,10 +362,10 @@ declare @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i16.nxv4f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv4i16_nxv4f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv4i16_nxv4f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv4i16_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -374,16 +376,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i16.nxv8f32( , - i32); + iXLen); -define @intrinsic_vfncvt_rtz.xu.f.w_nxv8i16_nxv8f32( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_rtz.xu.f.w_nxv8i16_nxv8f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv8i16_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -393,7 +395,7 @@ define @intrinsic_vfncvt_rtz.xu.f.w_nxv8i16_nxv8f32( @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i16.nxv8f32( %0, - i32 %1) + iXLen %1) ret %a } @@ -402,10 +404,10 @@ declare @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i16.nxv8f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv8i16_nxv8f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv8i16_nxv8f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv8i16_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -416,16 +418,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i16.nxv16f32( , - i32); + iXLen); -define @intrinsic_vfncvt_rtz.xu.f.w_nxv16i16_nxv16f32( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_rtz.xu.f.w_nxv16i16_nxv16f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv16i16_nxv16f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -435,7 +437,7 @@ define @intrinsic_vfncvt_rtz.xu.f.w_nxv16i16_nxv16f32( @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i16.nxv16f32( %0, - i32 %1) + iXLen %1) ret %a } @@ -444,10 +446,10 @@ declare @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i16.nxv16f32 , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv16i16_nxv16f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv16i16_nxv16f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv16i16_nxv16f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -458,16 +460,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i32.nxv1f64( , - i32); + iXLen); -define @intrinsic_vfncvt_rtz.xu.f.w_nxv1i32_nxv1f64( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_rtz.xu.f.w_nxv1i32_nxv1f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv1i32_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -477,7 +479,7 @@ define @intrinsic_vfncvt_rtz.xu.f.w_nxv1i32_nxv1f64( @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i32.nxv1f64( %0, - i32 %1) + iXLen %1) ret %a } @@ -486,10 +488,10 @@ declare @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i32.nxv1f64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv1i32_nxv1f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv1i32_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv1i32_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -500,16 +502,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i32.nxv2f64( , - i32); + iXLen); -define @intrinsic_vfncvt_rtz.xu.f.w_nxv2i32_nxv2f64( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_rtz.xu.f.w_nxv2i32_nxv2f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv2i32_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -519,7 +521,7 @@ define @intrinsic_vfncvt_rtz.xu.f.w_nxv2i32_nxv2f64( @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i32.nxv2f64( %0, - i32 %1) + iXLen %1) ret %a } @@ -528,10 +530,10 @@ declare @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i32.nxv2f64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv2i32_nxv2f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv2i32_nxv2f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv2i32_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -542,16 +544,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i32.nxv4f64( , - i32); + iXLen); -define @intrinsic_vfncvt_rtz.xu.f.w_nxv4i32_nxv4f64( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_rtz.xu.f.w_nxv4i32_nxv4f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv4i32_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -561,7 +563,7 @@ define @intrinsic_vfncvt_rtz.xu.f.w_nxv4i32_nxv4f64( @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i32.nxv4f64( %0, - i32 %1) + iXLen %1) ret %a } @@ -570,10 +572,10 @@ declare @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i32.nxv4f64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv4i32_nxv4f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv4i32_nxv4f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv4i32_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -584,16 +586,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i32.nxv8f64( , - i32); + iXLen); -define @intrinsic_vfncvt_rtz.xu.f.w_nxv8i32_nxv8f64( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_rtz.xu.f.w_nxv8i32_nxv8f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv8i32_nxv8f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -603,7 +605,7 @@ define @intrinsic_vfncvt_rtz.xu.f.w_nxv8i32_nxv8f64( @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i32.nxv8f64( %0, - i32 %1) + iXLen %1) ret %a } @@ -612,10 +614,10 @@ declare @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i32.nxv8f64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv8i32_nxv8f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv8i32_nxv8f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv8i32_nxv8f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -626,7 +628,7 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-x-f-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-x-f-rv64.ll deleted file mode 100644 index d78d695f3df2..000000000000 --- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-x-f-rv64.ll +++ /dev/null @@ -1,632 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s -declare @llvm.riscv.vfncvt.x.f.w.nxv1i8.nxv1f16( - , - i64); - -define @intrinsic_vfncvt_x.f.w_nxv1i8_nxv1f16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv1i8_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu -; CHECK-NEXT: vfncvt.x.f.w v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.x.f.w.nxv1i8.nxv1f16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.x.f.w.mask.nxv1i8.nxv1f16( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_x.f.w_nxv1i8_nxv1f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv1i8_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu -; CHECK-NEXT: vfncvt.x.f.w v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.x.f.w.mask.nxv1i8.nxv1f16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.x.f.w.nxv2i8.nxv2f16( - , - i64); - -define @intrinsic_vfncvt_x.f.w_nxv2i8_nxv2f16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv2i8_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu -; CHECK-NEXT: vfncvt.x.f.w v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.x.f.w.nxv2i8.nxv2f16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.x.f.w.mask.nxv2i8.nxv2f16( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_x.f.w_nxv2i8_nxv2f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv2i8_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu -; CHECK-NEXT: vfncvt.x.f.w v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.x.f.w.mask.nxv2i8.nxv2f16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.x.f.w.nxv4i8.nxv4f16( - , - i64); - -define @intrinsic_vfncvt_x.f.w_nxv4i8_nxv4f16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv4i8_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu -; CHECK-NEXT: vfncvt.x.f.w v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.x.f.w.nxv4i8.nxv4f16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.x.f.w.mask.nxv4i8.nxv4f16( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_x.f.w_nxv4i8_nxv4f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv4i8_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu -; CHECK-NEXT: vfncvt.x.f.w v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.x.f.w.mask.nxv4i8.nxv4f16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.x.f.w.nxv8i8.nxv8f16( - , - i64); - -define @intrinsic_vfncvt_x.f.w_nxv8i8_nxv8f16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv8i8_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu -; CHECK-NEXT: vfncvt.x.f.w v10, v8 -; CHECK-NEXT: vmv.v.v v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.x.f.w.nxv8i8.nxv8f16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.x.f.w.mask.nxv8i8.nxv8f16( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_x.f.w_nxv8i8_nxv8f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv8i8_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu -; CHECK-NEXT: vfncvt.x.f.w v8, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.x.f.w.mask.nxv8i8.nxv8f16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.x.f.w.nxv16i8.nxv16f16( - , - i64); - -define @intrinsic_vfncvt_x.f.w_nxv16i8_nxv16f16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv16i8_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu -; CHECK-NEXT: vfncvt.x.f.w v12, v8 -; CHECK-NEXT: vmv.v.v v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.x.f.w.nxv16i8.nxv16f16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.x.f.w.mask.nxv16i8.nxv16f16( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_x.f.w_nxv16i8_nxv16f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv16i8_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu -; CHECK-NEXT: vfncvt.x.f.w v8, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.x.f.w.mask.nxv16i8.nxv16f16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.x.f.w.nxv32i8.nxv32f16( - , - i64); - -define @intrinsic_vfncvt_x.f.w_nxv32i8_nxv32f16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv32i8_nxv32f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu -; CHECK-NEXT: vfncvt.x.f.w v16, v8 -; CHECK-NEXT: vmv.v.v v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.x.f.w.nxv32i8.nxv32f16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.x.f.w.mask.nxv32i8.nxv32f16( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_x.f.w_nxv32i8_nxv32f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv32i8_nxv32f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu -; CHECK-NEXT: vfncvt.x.f.w v8, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.x.f.w.mask.nxv32i8.nxv32f16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.x.f.w.nxv1i16.nxv1f32( - , - i64); - -define @intrinsic_vfncvt_x.f.w_nxv1i16_nxv1f32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv1i16_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfncvt.x.f.w v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.x.f.w.nxv1i16.nxv1f32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.x.f.w.mask.nxv1i16.nxv1f32( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_x.f.w_nxv1i16_nxv1f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv1i16_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfncvt.x.f.w v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.x.f.w.mask.nxv1i16.nxv1f32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.x.f.w.nxv2i16.nxv2f32( - , - i64); - -define @intrinsic_vfncvt_x.f.w_nxv2i16_nxv2f32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv2i16_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfncvt.x.f.w v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.x.f.w.nxv2i16.nxv2f32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.x.f.w.mask.nxv2i16.nxv2f32( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_x.f.w_nxv2i16_nxv2f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv2i16_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfncvt.x.f.w v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.x.f.w.mask.nxv2i16.nxv2f32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.x.f.w.nxv4i16.nxv4f32( - , - i64); - -define @intrinsic_vfncvt_x.f.w_nxv4i16_nxv4f32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv4i16_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfncvt.x.f.w v10, v8 -; CHECK-NEXT: vmv.v.v v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.x.f.w.nxv4i16.nxv4f32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.x.f.w.mask.nxv4i16.nxv4f32( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_x.f.w_nxv4i16_nxv4f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv4i16_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfncvt.x.f.w v8, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.x.f.w.mask.nxv4i16.nxv4f32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.x.f.w.nxv8i16.nxv8f32( - , - i64); - -define @intrinsic_vfncvt_x.f.w_nxv8i16_nxv8f32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv8i16_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfncvt.x.f.w v12, v8 -; CHECK-NEXT: vmv.v.v v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.x.f.w.nxv8i16.nxv8f32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.x.f.w.mask.nxv8i16.nxv8f32( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_x.f.w_nxv8i16_nxv8f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv8i16_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfncvt.x.f.w v8, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.x.f.w.mask.nxv8i16.nxv8f32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.x.f.w.nxv16i16.nxv16f32( - , - i64); - -define @intrinsic_vfncvt_x.f.w_nxv16i16_nxv16f32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv16i16_nxv16f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfncvt.x.f.w v16, v8 -; CHECK-NEXT: vmv.v.v v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.x.f.w.nxv16i16.nxv16f32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.x.f.w.mask.nxv16i16.nxv16f32( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_x.f.w_nxv16i16_nxv16f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv16i16_nxv16f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfncvt.x.f.w v8, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.x.f.w.mask.nxv16i16.nxv16f32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.x.f.w.nxv1i32.nxv1f64( - , - i64); - -define @intrinsic_vfncvt_x.f.w_nxv1i32_nxv1f64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv1i32_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfncvt.x.f.w v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.x.f.w.nxv1i32.nxv1f64( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.x.f.w.mask.nxv1i32.nxv1f64( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_x.f.w_nxv1i32_nxv1f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv1i32_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfncvt.x.f.w v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.x.f.w.mask.nxv1i32.nxv1f64( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.x.f.w.nxv2i32.nxv2f64( - , - i64); - -define @intrinsic_vfncvt_x.f.w_nxv2i32_nxv2f64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv2i32_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfncvt.x.f.w v10, v8 -; CHECK-NEXT: vmv.v.v v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.x.f.w.nxv2i32.nxv2f64( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.x.f.w.mask.nxv2i32.nxv2f64( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_x.f.w_nxv2i32_nxv2f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv2i32_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfncvt.x.f.w v8, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.x.f.w.mask.nxv2i32.nxv2f64( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.x.f.w.nxv4i32.nxv4f64( - , - i64); - -define @intrinsic_vfncvt_x.f.w_nxv4i32_nxv4f64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv4i32_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfncvt.x.f.w v12, v8 -; CHECK-NEXT: vmv.v.v v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.x.f.w.nxv4i32.nxv4f64( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.x.f.w.mask.nxv4i32.nxv4f64( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_x.f.w_nxv4i32_nxv4f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv4i32_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfncvt.x.f.w v8, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.x.f.w.mask.nxv4i32.nxv4f64( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.x.f.w.nxv8i32.nxv8f64( - , - i64); - -define @intrinsic_vfncvt_x.f.w_nxv8i32_nxv8f64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv8i32_nxv8f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfncvt.x.f.w v16, v8 -; CHECK-NEXT: vmv.v.v v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.x.f.w.nxv8i32.nxv8f64( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.x.f.w.mask.nxv8i32.nxv8f64( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_x.f.w_nxv8i32_nxv8f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv8i32_nxv8f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfncvt.x.f.w v8, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.x.f.w.mask.nxv8i32.nxv8f64( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-x-f-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-x-f.ll similarity index 87% rename from llvm/test/CodeGen/RISCV/rvv/vfncvt-x-f-rv32.ll rename to llvm/test/CodeGen/RISCV/rvv/vfncvt-x-f.ll index 6c97455f1992..0b8f9c62e50a 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-x-f-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-x-f.ll @@ -1,11 +1,13 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfncvt.x.f.w.nxv1i8.nxv1f16( , - i32); + iXLen); -define @intrinsic_vfncvt_x.f.w_nxv1i8_nxv1f16( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_x.f.w_nxv1i8_nxv1f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv1i8_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu @@ -15,7 +17,7 @@ define @intrinsic_vfncvt_x.f.w_nxv1i8_nxv1f16( @llvm.riscv.vfncvt.x.f.w.nxv1i8.nxv1f16( %0, - i32 %1) + iXLen %1) ret %a } @@ -24,10 +26,10 @@ declare @llvm.riscv.vfncvt.x.f.w.mask.nxv1i8.nxv1f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_x.f.w_nxv1i8_nxv1f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_x.f.w_nxv1i8_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv1i8_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu @@ -38,16 +40,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.x.f.w.nxv2i8.nxv2f16( , - i32); + iXLen); -define @intrinsic_vfncvt_x.f.w_nxv2i8_nxv2f16( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_x.f.w_nxv2i8_nxv2f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv2i8_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu @@ -57,7 +59,7 @@ define @intrinsic_vfncvt_x.f.w_nxv2i8_nxv2f16( @llvm.riscv.vfncvt.x.f.w.nxv2i8.nxv2f16( %0, - i32 %1) + iXLen %1) ret %a } @@ -66,10 +68,10 @@ declare @llvm.riscv.vfncvt.x.f.w.mask.nxv2i8.nxv2f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_x.f.w_nxv2i8_nxv2f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_x.f.w_nxv2i8_nxv2f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv2i8_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu @@ -80,16 +82,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.x.f.w.nxv4i8.nxv4f16( , - i32); + iXLen); -define @intrinsic_vfncvt_x.f.w_nxv4i8_nxv4f16( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_x.f.w_nxv4i8_nxv4f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv4i8_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu @@ -99,7 +101,7 @@ define @intrinsic_vfncvt_x.f.w_nxv4i8_nxv4f16( @llvm.riscv.vfncvt.x.f.w.nxv4i8.nxv4f16( %0, - i32 %1) + iXLen %1) ret %a } @@ -108,10 +110,10 @@ declare @llvm.riscv.vfncvt.x.f.w.mask.nxv4i8.nxv4f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_x.f.w_nxv4i8_nxv4f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_x.f.w_nxv4i8_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv4i8_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu @@ -122,16 +124,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.x.f.w.nxv8i8.nxv8f16( , - i32); + iXLen); -define @intrinsic_vfncvt_x.f.w_nxv8i8_nxv8f16( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_x.f.w_nxv8i8_nxv8f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv8i8_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu @@ -141,7 +143,7 @@ define @intrinsic_vfncvt_x.f.w_nxv8i8_nxv8f16( @llvm.riscv.vfncvt.x.f.w.nxv8i8.nxv8f16( %0, - i32 %1) + iXLen %1) ret %a } @@ -150,10 +152,10 @@ declare @llvm.riscv.vfncvt.x.f.w.mask.nxv8i8.nxv8f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_x.f.w_nxv8i8_nxv8f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_x.f.w_nxv8i8_nxv8f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv8i8_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu @@ -164,16 +166,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.x.f.w.nxv16i8.nxv16f16( , - i32); + iXLen); -define @intrinsic_vfncvt_x.f.w_nxv16i8_nxv16f16( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_x.f.w_nxv16i8_nxv16f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv16i8_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu @@ -183,7 +185,7 @@ define @intrinsic_vfncvt_x.f.w_nxv16i8_nxv16f16( @llvm.riscv.vfncvt.x.f.w.nxv16i8.nxv16f16( %0, - i32 %1) + iXLen %1) ret %a } @@ -192,10 +194,10 @@ declare @llvm.riscv.vfncvt.x.f.w.mask.nxv16i8.nxv16f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_x.f.w_nxv16i8_nxv16f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_x.f.w_nxv16i8_nxv16f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv16i8_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu @@ -206,16 +208,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.x.f.w.nxv32i8.nxv32f16( , - i32); + iXLen); -define @intrinsic_vfncvt_x.f.w_nxv32i8_nxv32f16( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_x.f.w_nxv32i8_nxv32f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv32i8_nxv32f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu @@ -225,7 +227,7 @@ define @intrinsic_vfncvt_x.f.w_nxv32i8_nxv32f16( @llvm.riscv.vfncvt.x.f.w.nxv32i8.nxv32f16( %0, - i32 %1) + iXLen %1) ret %a } @@ -234,10 +236,10 @@ declare @llvm.riscv.vfncvt.x.f.w.mask.nxv32i8.nxv32f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_x.f.w_nxv32i8_nxv32f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_x.f.w_nxv32i8_nxv32f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv32i8_nxv32f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu @@ -248,16 +250,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.x.f.w.nxv1i16.nxv1f32( , - i32); + iXLen); -define @intrinsic_vfncvt_x.f.w_nxv1i16_nxv1f32( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_x.f.w_nxv1i16_nxv1f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv1i16_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -267,7 +269,7 @@ define @intrinsic_vfncvt_x.f.w_nxv1i16_nxv1f32( @llvm.riscv.vfncvt.x.f.w.nxv1i16.nxv1f32( %0, - i32 %1) + iXLen %1) ret %a } @@ -276,10 +278,10 @@ declare @llvm.riscv.vfncvt.x.f.w.mask.nxv1i16.nxv1f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_x.f.w_nxv1i16_nxv1f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_x.f.w_nxv1i16_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv1i16_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -290,16 +292,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.x.f.w.nxv2i16.nxv2f32( , - i32); + iXLen); -define @intrinsic_vfncvt_x.f.w_nxv2i16_nxv2f32( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_x.f.w_nxv2i16_nxv2f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv2i16_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -309,7 +311,7 @@ define @intrinsic_vfncvt_x.f.w_nxv2i16_nxv2f32( @llvm.riscv.vfncvt.x.f.w.nxv2i16.nxv2f32( %0, - i32 %1) + iXLen %1) ret %a } @@ -318,10 +320,10 @@ declare @llvm.riscv.vfncvt.x.f.w.mask.nxv2i16.nxv2f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_x.f.w_nxv2i16_nxv2f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_x.f.w_nxv2i16_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv2i16_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -332,16 +334,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.x.f.w.nxv4i16.nxv4f32( , - i32); + iXLen); -define @intrinsic_vfncvt_x.f.w_nxv4i16_nxv4f32( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_x.f.w_nxv4i16_nxv4f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv4i16_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -351,7 +353,7 @@ define @intrinsic_vfncvt_x.f.w_nxv4i16_nxv4f32( @llvm.riscv.vfncvt.x.f.w.nxv4i16.nxv4f32( %0, - i32 %1) + iXLen %1) ret %a } @@ -360,10 +362,10 @@ declare @llvm.riscv.vfncvt.x.f.w.mask.nxv4i16.nxv4f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_x.f.w_nxv4i16_nxv4f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_x.f.w_nxv4i16_nxv4f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv4i16_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -374,16 +376,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.x.f.w.nxv8i16.nxv8f32( , - i32); + iXLen); -define @intrinsic_vfncvt_x.f.w_nxv8i16_nxv8f32( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_x.f.w_nxv8i16_nxv8f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv8i16_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -393,7 +395,7 @@ define @intrinsic_vfncvt_x.f.w_nxv8i16_nxv8f32( @llvm.riscv.vfncvt.x.f.w.nxv8i16.nxv8f32( %0, - i32 %1) + iXLen %1) ret %a } @@ -402,10 +404,10 @@ declare @llvm.riscv.vfncvt.x.f.w.mask.nxv8i16.nxv8f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_x.f.w_nxv8i16_nxv8f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_x.f.w_nxv8i16_nxv8f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv8i16_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -416,16 +418,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.x.f.w.nxv16i16.nxv16f32( , - i32); + iXLen); -define @intrinsic_vfncvt_x.f.w_nxv16i16_nxv16f32( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_x.f.w_nxv16i16_nxv16f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv16i16_nxv16f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -435,7 +437,7 @@ define @intrinsic_vfncvt_x.f.w_nxv16i16_nxv16f32( @llvm.riscv.vfncvt.x.f.w.nxv16i16.nxv16f32( %0, - i32 %1) + iXLen %1) ret %a } @@ -444,10 +446,10 @@ declare @llvm.riscv.vfncvt.x.f.w.mask.nxv16i16.nxv16f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_x.f.w_nxv16i16_nxv16f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_x.f.w_nxv16i16_nxv16f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv16i16_nxv16f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -458,16 +460,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.x.f.w.nxv1i32.nxv1f64( , - i32); + iXLen); -define @intrinsic_vfncvt_x.f.w_nxv1i32_nxv1f64( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_x.f.w_nxv1i32_nxv1f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv1i32_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -477,7 +479,7 @@ define @intrinsic_vfncvt_x.f.w_nxv1i32_nxv1f64( @llvm.riscv.vfncvt.x.f.w.nxv1i32.nxv1f64( %0, - i32 %1) + iXLen %1) ret %a } @@ -486,10 +488,10 @@ declare @llvm.riscv.vfncvt.x.f.w.mask.nxv1i32.nxv1f64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_x.f.w_nxv1i32_nxv1f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_x.f.w_nxv1i32_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv1i32_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -500,16 +502,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.x.f.w.nxv2i32.nxv2f64( , - i32); + iXLen); -define @intrinsic_vfncvt_x.f.w_nxv2i32_nxv2f64( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_x.f.w_nxv2i32_nxv2f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv2i32_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -519,7 +521,7 @@ define @intrinsic_vfncvt_x.f.w_nxv2i32_nxv2f64( @llvm.riscv.vfncvt.x.f.w.nxv2i32.nxv2f64( %0, - i32 %1) + iXLen %1) ret %a } @@ -528,10 +530,10 @@ declare @llvm.riscv.vfncvt.x.f.w.mask.nxv2i32.nxv2f64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_x.f.w_nxv2i32_nxv2f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_x.f.w_nxv2i32_nxv2f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv2i32_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -542,16 +544,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.x.f.w.nxv4i32.nxv4f64( , - i32); + iXLen); -define @intrinsic_vfncvt_x.f.w_nxv4i32_nxv4f64( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_x.f.w_nxv4i32_nxv4f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv4i32_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -561,7 +563,7 @@ define @intrinsic_vfncvt_x.f.w_nxv4i32_nxv4f64( @llvm.riscv.vfncvt.x.f.w.nxv4i32.nxv4f64( %0, - i32 %1) + iXLen %1) ret %a } @@ -570,10 +572,10 @@ declare @llvm.riscv.vfncvt.x.f.w.mask.nxv4i32.nxv4f64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_x.f.w_nxv4i32_nxv4f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_x.f.w_nxv4i32_nxv4f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv4i32_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -584,16 +586,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.x.f.w.nxv8i32.nxv8f64( , - i32); + iXLen); -define @intrinsic_vfncvt_x.f.w_nxv8i32_nxv8f64( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_x.f.w_nxv8i32_nxv8f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv8i32_nxv8f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -603,7 +605,7 @@ define @intrinsic_vfncvt_x.f.w_nxv8i32_nxv8f64( @llvm.riscv.vfncvt.x.f.w.nxv8i32.nxv8f64( %0, - i32 %1) + iXLen %1) ret %a } @@ -612,10 +614,10 @@ declare @llvm.riscv.vfncvt.x.f.w.mask.nxv8i32.nxv8f64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_x.f.w_nxv8i32_nxv8f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_x.f.w_nxv8i32_nxv8f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv8i32_nxv8f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -626,7 +628,7 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-xu-f-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-xu-f-rv64.ll deleted file mode 100644 index c7bb913f3797..000000000000 --- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-xu-f-rv64.ll +++ /dev/null @@ -1,632 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s -declare @llvm.riscv.vfncvt.xu.f.w.nxv1i8.nxv1f16( - , - i64); - -define @intrinsic_vfncvt_xu.f.w_nxv1i8_nxv1f16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv1i8_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu -; CHECK-NEXT: vfncvt.xu.f.w v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.xu.f.w.nxv1i8.nxv1f16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1f16( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_xu.f.w_nxv1i8_nxv1f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv1i8_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu -; CHECK-NEXT: vfncvt.xu.f.w v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1f16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.xu.f.w.nxv2i8.nxv2f16( - , - i64); - -define @intrinsic_vfncvt_xu.f.w_nxv2i8_nxv2f16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv2i8_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu -; CHECK-NEXT: vfncvt.xu.f.w v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.xu.f.w.nxv2i8.nxv2f16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i8.nxv2f16( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_xu.f.w_nxv2i8_nxv2f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv2i8_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu -; CHECK-NEXT: vfncvt.xu.f.w v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i8.nxv2f16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.xu.f.w.nxv4i8.nxv4f16( - , - i64); - -define @intrinsic_vfncvt_xu.f.w_nxv4i8_nxv4f16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv4i8_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu -; CHECK-NEXT: vfncvt.xu.f.w v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.xu.f.w.nxv4i8.nxv4f16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i8.nxv4f16( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_xu.f.w_nxv4i8_nxv4f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv4i8_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu -; CHECK-NEXT: vfncvt.xu.f.w v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i8.nxv4f16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.xu.f.w.nxv8i8.nxv8f16( - , - i64); - -define @intrinsic_vfncvt_xu.f.w_nxv8i8_nxv8f16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv8i8_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu -; CHECK-NEXT: vfncvt.xu.f.w v10, v8 -; CHECK-NEXT: vmv.v.v v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.xu.f.w.nxv8i8.nxv8f16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i8.nxv8f16( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_xu.f.w_nxv8i8_nxv8f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv8i8_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu -; CHECK-NEXT: vfncvt.xu.f.w v8, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i8.nxv8f16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.xu.f.w.nxv16i8.nxv16f16( - , - i64); - -define @intrinsic_vfncvt_xu.f.w_nxv16i8_nxv16f16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv16i8_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu -; CHECK-NEXT: vfncvt.xu.f.w v12, v8 -; CHECK-NEXT: vmv.v.v v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.xu.f.w.nxv16i8.nxv16f16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i8.nxv16f16( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_xu.f.w_nxv16i8_nxv16f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv16i8_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu -; CHECK-NEXT: vfncvt.xu.f.w v8, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i8.nxv16f16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.xu.f.w.nxv32i8.nxv32f16( - , - i64); - -define @intrinsic_vfncvt_xu.f.w_nxv32i8_nxv32f16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv32i8_nxv32f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu -; CHECK-NEXT: vfncvt.xu.f.w v16, v8 -; CHECK-NEXT: vmv.v.v v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.xu.f.w.nxv32i8.nxv32f16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv32i8.nxv32f16( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_xu.f.w_nxv32i8_nxv32f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv32i8_nxv32f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu -; CHECK-NEXT: vfncvt.xu.f.w v8, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv32i8.nxv32f16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.xu.f.w.nxv1i16.nxv1f32( - , - i64); - -define @intrinsic_vfncvt_xu.f.w_nxv1i16_nxv1f32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv1i16_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfncvt.xu.f.w v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.xu.f.w.nxv1i16.nxv1f32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i16.nxv1f32( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_xu.f.w_nxv1i16_nxv1f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv1i16_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfncvt.xu.f.w v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i16.nxv1f32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.xu.f.w.nxv2i16.nxv2f32( - , - i64); - -define @intrinsic_vfncvt_xu.f.w_nxv2i16_nxv2f32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv2i16_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfncvt.xu.f.w v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.xu.f.w.nxv2i16.nxv2f32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i16.nxv2f32( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_xu.f.w_nxv2i16_nxv2f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv2i16_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfncvt.xu.f.w v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i16.nxv2f32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.xu.f.w.nxv4i16.nxv4f32( - , - i64); - -define @intrinsic_vfncvt_xu.f.w_nxv4i16_nxv4f32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv4i16_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfncvt.xu.f.w v10, v8 -; CHECK-NEXT: vmv.v.v v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.xu.f.w.nxv4i16.nxv4f32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i16.nxv4f32( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_xu.f.w_nxv4i16_nxv4f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv4i16_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfncvt.xu.f.w v8, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i16.nxv4f32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.xu.f.w.nxv8i16.nxv8f32( - , - i64); - -define @intrinsic_vfncvt_xu.f.w_nxv8i16_nxv8f32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv8i16_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfncvt.xu.f.w v12, v8 -; CHECK-NEXT: vmv.v.v v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.xu.f.w.nxv8i16.nxv8f32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i16.nxv8f32( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_xu.f.w_nxv8i16_nxv8f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv8i16_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfncvt.xu.f.w v8, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i16.nxv8f32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.xu.f.w.nxv16i16.nxv16f32( - , - i64); - -define @intrinsic_vfncvt_xu.f.w_nxv16i16_nxv16f32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv16i16_nxv16f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfncvt.xu.f.w v16, v8 -; CHECK-NEXT: vmv.v.v v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.xu.f.w.nxv16i16.nxv16f32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i16.nxv16f32( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_xu.f.w_nxv16i16_nxv16f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv16i16_nxv16f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfncvt.xu.f.w v8, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i16.nxv16f32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.xu.f.w.nxv1i32.nxv1f64( - , - i64); - -define @intrinsic_vfncvt_xu.f.w_nxv1i32_nxv1f64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv1i32_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfncvt.xu.f.w v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.xu.f.w.nxv1i32.nxv1f64( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i32.nxv1f64( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_xu.f.w_nxv1i32_nxv1f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv1i32_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfncvt.xu.f.w v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i32.nxv1f64( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.xu.f.w.nxv2i32.nxv2f64( - , - i64); - -define @intrinsic_vfncvt_xu.f.w_nxv2i32_nxv2f64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv2i32_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfncvt.xu.f.w v10, v8 -; CHECK-NEXT: vmv.v.v v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.xu.f.w.nxv2i32.nxv2f64( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i32.nxv2f64( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_xu.f.w_nxv2i32_nxv2f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv2i32_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfncvt.xu.f.w v8, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i32.nxv2f64( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.xu.f.w.nxv4i32.nxv4f64( - , - i64); - -define @intrinsic_vfncvt_xu.f.w_nxv4i32_nxv4f64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv4i32_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfncvt.xu.f.w v12, v8 -; CHECK-NEXT: vmv.v.v v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.xu.f.w.nxv4i32.nxv4f64( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i32.nxv4f64( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_xu.f.w_nxv4i32_nxv4f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv4i32_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfncvt.xu.f.w v8, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i32.nxv4f64( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfncvt.xu.f.w.nxv8i32.nxv8f64( - , - i64); - -define @intrinsic_vfncvt_xu.f.w_nxv8i32_nxv8f64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv8i32_nxv8f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfncvt.xu.f.w v16, v8 -; CHECK-NEXT: vmv.v.v v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.xu.f.w.nxv8i32.nxv8f64( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i32.nxv8f64( - , - , - , - i64, - i64); - -define @intrinsic_vfncvt_mask_xu.f.w_nxv8i32_nxv8f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv8i32_nxv8f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfncvt.xu.f.w v8, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i32.nxv8f64( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-xu-f-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-xu-f.ll similarity index 87% rename from llvm/test/CodeGen/RISCV/rvv/vfncvt-xu-f-rv32.ll rename to llvm/test/CodeGen/RISCV/rvv/vfncvt-xu-f.ll index 4981f8b16d74..7d802cabd3f7 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-xu-f-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-xu-f.ll @@ -1,11 +1,13 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfncvt.xu.f.w.nxv1i8.nxv1f16( , - i32); + iXLen); -define @intrinsic_vfncvt_xu.f.w_nxv1i8_nxv1f16( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_xu.f.w_nxv1i8_nxv1f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv1i8_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu @@ -15,7 +17,7 @@ define @intrinsic_vfncvt_xu.f.w_nxv1i8_nxv1f16( @llvm.riscv.vfncvt.xu.f.w.nxv1i8.nxv1f16( %0, - i32 %1) + iXLen %1) ret %a } @@ -24,10 +26,10 @@ declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_xu.f.w_nxv1i8_nxv1f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_xu.f.w_nxv1i8_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv1i8_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu @@ -38,16 +40,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.xu.f.w.nxv2i8.nxv2f16( , - i32); + iXLen); -define @intrinsic_vfncvt_xu.f.w_nxv2i8_nxv2f16( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_xu.f.w_nxv2i8_nxv2f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv2i8_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu @@ -57,7 +59,7 @@ define @intrinsic_vfncvt_xu.f.w_nxv2i8_nxv2f16( @llvm.riscv.vfncvt.xu.f.w.nxv2i8.nxv2f16( %0, - i32 %1) + iXLen %1) ret %a } @@ -66,10 +68,10 @@ declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i8.nxv2f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_xu.f.w_nxv2i8_nxv2f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_xu.f.w_nxv2i8_nxv2f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv2i8_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu @@ -80,16 +82,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.xu.f.w.nxv4i8.nxv4f16( , - i32); + iXLen); -define @intrinsic_vfncvt_xu.f.w_nxv4i8_nxv4f16( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_xu.f.w_nxv4i8_nxv4f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv4i8_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu @@ -99,7 +101,7 @@ define @intrinsic_vfncvt_xu.f.w_nxv4i8_nxv4f16( @llvm.riscv.vfncvt.xu.f.w.nxv4i8.nxv4f16( %0, - i32 %1) + iXLen %1) ret %a } @@ -108,10 +110,10 @@ declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i8.nxv4f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_xu.f.w_nxv4i8_nxv4f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_xu.f.w_nxv4i8_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv4i8_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu @@ -122,16 +124,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.xu.f.w.nxv8i8.nxv8f16( , - i32); + iXLen); -define @intrinsic_vfncvt_xu.f.w_nxv8i8_nxv8f16( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_xu.f.w_nxv8i8_nxv8f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv8i8_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu @@ -141,7 +143,7 @@ define @intrinsic_vfncvt_xu.f.w_nxv8i8_nxv8f16( @llvm.riscv.vfncvt.xu.f.w.nxv8i8.nxv8f16( %0, - i32 %1) + iXLen %1) ret %a } @@ -150,10 +152,10 @@ declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i8.nxv8f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_xu.f.w_nxv8i8_nxv8f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_xu.f.w_nxv8i8_nxv8f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv8i8_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu @@ -164,16 +166,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.xu.f.w.nxv16i8.nxv16f16( , - i32); + iXLen); -define @intrinsic_vfncvt_xu.f.w_nxv16i8_nxv16f16( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_xu.f.w_nxv16i8_nxv16f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv16i8_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu @@ -183,7 +185,7 @@ define @intrinsic_vfncvt_xu.f.w_nxv16i8_nxv16f16( @llvm.riscv.vfncvt.xu.f.w.nxv16i8.nxv16f16( %0, - i32 %1) + iXLen %1) ret %a } @@ -192,10 +194,10 @@ declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i8.nxv16f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_xu.f.w_nxv16i8_nxv16f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_xu.f.w_nxv16i8_nxv16f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv16i8_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu @@ -206,16 +208,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.xu.f.w.nxv32i8.nxv32f16( , - i32); + iXLen); -define @intrinsic_vfncvt_xu.f.w_nxv32i8_nxv32f16( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_xu.f.w_nxv32i8_nxv32f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv32i8_nxv32f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu @@ -225,7 +227,7 @@ define @intrinsic_vfncvt_xu.f.w_nxv32i8_nxv32f16( @llvm.riscv.vfncvt.xu.f.w.nxv32i8.nxv32f16( %0, - i32 %1) + iXLen %1) ret %a } @@ -234,10 +236,10 @@ declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv32i8.nxv32f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_xu.f.w_nxv32i8_nxv32f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_xu.f.w_nxv32i8_nxv32f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv32i8_nxv32f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu @@ -248,16 +250,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.xu.f.w.nxv1i16.nxv1f32( , - i32); + iXLen); -define @intrinsic_vfncvt_xu.f.w_nxv1i16_nxv1f32( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_xu.f.w_nxv1i16_nxv1f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv1i16_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -267,7 +269,7 @@ define @intrinsic_vfncvt_xu.f.w_nxv1i16_nxv1f32( @llvm.riscv.vfncvt.xu.f.w.nxv1i16.nxv1f32( %0, - i32 %1) + iXLen %1) ret %a } @@ -276,10 +278,10 @@ declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i16.nxv1f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_xu.f.w_nxv1i16_nxv1f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_xu.f.w_nxv1i16_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv1i16_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -290,16 +292,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.xu.f.w.nxv2i16.nxv2f32( , - i32); + iXLen); -define @intrinsic_vfncvt_xu.f.w_nxv2i16_nxv2f32( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_xu.f.w_nxv2i16_nxv2f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv2i16_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -309,7 +311,7 @@ define @intrinsic_vfncvt_xu.f.w_nxv2i16_nxv2f32( @llvm.riscv.vfncvt.xu.f.w.nxv2i16.nxv2f32( %0, - i32 %1) + iXLen %1) ret %a } @@ -318,10 +320,10 @@ declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i16.nxv2f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_xu.f.w_nxv2i16_nxv2f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_xu.f.w_nxv2i16_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv2i16_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -332,16 +334,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.xu.f.w.nxv4i16.nxv4f32( , - i32); + iXLen); -define @intrinsic_vfncvt_xu.f.w_nxv4i16_nxv4f32( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_xu.f.w_nxv4i16_nxv4f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv4i16_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -351,7 +353,7 @@ define @intrinsic_vfncvt_xu.f.w_nxv4i16_nxv4f32( @llvm.riscv.vfncvt.xu.f.w.nxv4i16.nxv4f32( %0, - i32 %1) + iXLen %1) ret %a } @@ -360,10 +362,10 @@ declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i16.nxv4f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_xu.f.w_nxv4i16_nxv4f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_xu.f.w_nxv4i16_nxv4f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv4i16_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -374,16 +376,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.xu.f.w.nxv8i16.nxv8f32( , - i32); + iXLen); -define @intrinsic_vfncvt_xu.f.w_nxv8i16_nxv8f32( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_xu.f.w_nxv8i16_nxv8f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv8i16_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -393,7 +395,7 @@ define @intrinsic_vfncvt_xu.f.w_nxv8i16_nxv8f32( @llvm.riscv.vfncvt.xu.f.w.nxv8i16.nxv8f32( %0, - i32 %1) + iXLen %1) ret %a } @@ -402,10 +404,10 @@ declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i16.nxv8f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_xu.f.w_nxv8i16_nxv8f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_xu.f.w_nxv8i16_nxv8f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv8i16_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -416,16 +418,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.xu.f.w.nxv16i16.nxv16f32( , - i32); + iXLen); -define @intrinsic_vfncvt_xu.f.w_nxv16i16_nxv16f32( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_xu.f.w_nxv16i16_nxv16f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv16i16_nxv16f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -435,7 +437,7 @@ define @intrinsic_vfncvt_xu.f.w_nxv16i16_nxv16f32( @llvm.riscv.vfncvt.xu.f.w.nxv16i16.nxv16f32( %0, - i32 %1) + iXLen %1) ret %a } @@ -444,10 +446,10 @@ declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i16.nxv16f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_xu.f.w_nxv16i16_nxv16f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_xu.f.w_nxv16i16_nxv16f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv16i16_nxv16f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -458,16 +460,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.xu.f.w.nxv1i32.nxv1f64( , - i32); + iXLen); -define @intrinsic_vfncvt_xu.f.w_nxv1i32_nxv1f64( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_xu.f.w_nxv1i32_nxv1f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv1i32_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -477,7 +479,7 @@ define @intrinsic_vfncvt_xu.f.w_nxv1i32_nxv1f64( @llvm.riscv.vfncvt.xu.f.w.nxv1i32.nxv1f64( %0, - i32 %1) + iXLen %1) ret %a } @@ -486,10 +488,10 @@ declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i32.nxv1f64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_xu.f.w_nxv1i32_nxv1f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_xu.f.w_nxv1i32_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv1i32_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -500,16 +502,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.xu.f.w.nxv2i32.nxv2f64( , - i32); + iXLen); -define @intrinsic_vfncvt_xu.f.w_nxv2i32_nxv2f64( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_xu.f.w_nxv2i32_nxv2f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv2i32_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -519,7 +521,7 @@ define @intrinsic_vfncvt_xu.f.w_nxv2i32_nxv2f64( @llvm.riscv.vfncvt.xu.f.w.nxv2i32.nxv2f64( %0, - i32 %1) + iXLen %1) ret %a } @@ -528,10 +530,10 @@ declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i32.nxv2f64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_xu.f.w_nxv2i32_nxv2f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_xu.f.w_nxv2i32_nxv2f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv2i32_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -542,16 +544,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.xu.f.w.nxv4i32.nxv4f64( , - i32); + iXLen); -define @intrinsic_vfncvt_xu.f.w_nxv4i32_nxv4f64( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_xu.f.w_nxv4i32_nxv4f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv4i32_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -561,7 +563,7 @@ define @intrinsic_vfncvt_xu.f.w_nxv4i32_nxv4f64( @llvm.riscv.vfncvt.xu.f.w.nxv4i32.nxv4f64( %0, - i32 %1) + iXLen %1) ret %a } @@ -570,10 +572,10 @@ declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i32.nxv4f64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_xu.f.w_nxv4i32_nxv4f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_xu.f.w_nxv4i32_nxv4f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv4i32_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -584,16 +586,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.xu.f.w.nxv8i32.nxv8f64( , - i32); + iXLen); -define @intrinsic_vfncvt_xu.f.w_nxv8i32_nxv8f64( %0, i32 %1) nounwind { +define @intrinsic_vfncvt_xu.f.w_nxv8i32_nxv8f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv8i32_nxv8f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -603,7 +605,7 @@ define @intrinsic_vfncvt_xu.f.w_nxv8i32_nxv8f64( @llvm.riscv.vfncvt.xu.f.w.nxv8i32.nxv8f64( %0, - i32 %1) + iXLen %1) ret %a } @@ -612,10 +614,10 @@ declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i32.nxv8f64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfncvt_mask_xu.f.w_nxv8i32_nxv8f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfncvt_mask_xu.f.w_nxv8i32_nxv8f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv8i32_nxv8f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -626,7 +628,7 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfnmacc-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmacc-rv64.ll deleted file mode 100644 index f8419e81f7d0..000000000000 --- a/llvm/test/CodeGen/RISCV/rvv/vfnmacc-rv64.ll +++ /dev/null @@ -1,1106 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: -target-abi=lp64d < %s | FileCheck %s -declare @llvm.riscv.vfnmacc.nxv1f16.nxv1f16( - , - , - , - i64); - -define @intrinsic_vfnmacc_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv1f16_nxv1f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu -; CHECK-NEXT: vfnmacc.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmacc.nxv1f16.nxv1f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfnmacc.mask.nxv1f16.nxv1f16( - , - , - , - , - i64); - -define @intrinsic_vfnmacc_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv1f16_nxv1f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu -; CHECK-NEXT: vfnmacc.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmacc.mask.nxv1f16.nxv1f16( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfnmacc.nxv2f16.nxv2f16( - , - , - , - i64); - -define @intrinsic_vfnmacc_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv2f16_nxv2f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu -; CHECK-NEXT: vfnmacc.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmacc.nxv2f16.nxv2f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfnmacc.mask.nxv2f16.nxv2f16( - , - , - , - , - i64); - -define @intrinsic_vfnmacc_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv2f16_nxv2f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu -; CHECK-NEXT: vfnmacc.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmacc.mask.nxv2f16.nxv2f16( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfnmacc.nxv4f16.nxv4f16( - , - , - , - i64); - -define @intrinsic_vfnmacc_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv4f16_nxv4f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu -; CHECK-NEXT: vfnmacc.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmacc.nxv4f16.nxv4f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfnmacc.mask.nxv4f16.nxv4f16( - , - , - , - , - i64); - -define @intrinsic_vfnmacc_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv4f16_nxv4f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu -; CHECK-NEXT: vfnmacc.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmacc.mask.nxv4f16.nxv4f16( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfnmacc.nxv8f16.nxv8f16( - , - , - , - i64); - -define @intrinsic_vfnmacc_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv8f16_nxv8f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu -; CHECK-NEXT: vfnmacc.vv v8, v10, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmacc.nxv8f16.nxv8f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfnmacc.mask.nxv8f16.nxv8f16( - , - , - , - , - i64); - -define @intrinsic_vfnmacc_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv8f16_nxv8f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu -; CHECK-NEXT: vfnmacc.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmacc.mask.nxv8f16.nxv8f16( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfnmacc.nxv16f16.nxv16f16( - , - , - , - i64); - -define @intrinsic_vfnmacc_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv16f16_nxv16f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu -; CHECK-NEXT: vfnmacc.vv v8, v12, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmacc.nxv16f16.nxv16f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfnmacc.mask.nxv16f16.nxv16f16( - , - , - , - , - i64); - -define @intrinsic_vfnmacc_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv16f16_nxv16f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu -; CHECK-NEXT: vfnmacc.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmacc.mask.nxv16f16.nxv16f16( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfnmacc.nxv1f32.nxv1f32( - , - , - , - i64); - -define @intrinsic_vfnmacc_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv1f32_nxv1f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu -; CHECK-NEXT: vfnmacc.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmacc.nxv1f32.nxv1f32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfnmacc.mask.nxv1f32.nxv1f32( - , - , - , - , - i64); - -define @intrinsic_vfnmacc_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv1f32_nxv1f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu -; CHECK-NEXT: vfnmacc.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmacc.mask.nxv1f32.nxv1f32( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfnmacc.nxv2f32.nxv2f32( - , - , - , - i64); - -define @intrinsic_vfnmacc_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv2f32_nxv2f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu -; CHECK-NEXT: vfnmacc.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmacc.nxv2f32.nxv2f32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfnmacc.mask.nxv2f32.nxv2f32( - , - , - , - , - i64); - -define @intrinsic_vfnmacc_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv2f32_nxv2f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu -; CHECK-NEXT: vfnmacc.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmacc.mask.nxv2f32.nxv2f32( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfnmacc.nxv4f32.nxv4f32( - , - , - , - i64); - -define @intrinsic_vfnmacc_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv4f32_nxv4f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu -; CHECK-NEXT: vfnmacc.vv v8, v10, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmacc.nxv4f32.nxv4f32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfnmacc.mask.nxv4f32.nxv4f32( - , - , - , - , - i64); - -define @intrinsic_vfnmacc_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv4f32_nxv4f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu -; CHECK-NEXT: vfnmacc.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmacc.mask.nxv4f32.nxv4f32( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfnmacc.nxv8f32.nxv8f32( - , - , - , - i64); - -define @intrinsic_vfnmacc_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv8f32_nxv8f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu -; CHECK-NEXT: vfnmacc.vv v8, v12, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmacc.nxv8f32.nxv8f32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfnmacc.mask.nxv8f32.nxv8f32( - , - , - , - , - i64); - -define @intrinsic_vfnmacc_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv8f32_nxv8f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu -; CHECK-NEXT: vfnmacc.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmacc.mask.nxv8f32.nxv8f32( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfnmacc.nxv1f64.nxv1f64( - , - , - , - i64); - -define @intrinsic_vfnmacc_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv1f64_nxv1f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu -; CHECK-NEXT: vfnmacc.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmacc.nxv1f64.nxv1f64( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfnmacc.mask.nxv1f64.nxv1f64( - , - , - , - , - i64); - -define @intrinsic_vfnmacc_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv1f64_nxv1f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu -; CHECK-NEXT: vfnmacc.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmacc.mask.nxv1f64.nxv1f64( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfnmacc.nxv2f64.nxv2f64( - , - , - , - i64); - -define @intrinsic_vfnmacc_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv2f64_nxv2f64_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu -; CHECK-NEXT: vfnmacc.vv v8, v10, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmacc.nxv2f64.nxv2f64( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfnmacc.mask.nxv2f64.nxv2f64( - , - , - , - , - i64); - -define @intrinsic_vfnmacc_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv2f64_nxv2f64_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu -; CHECK-NEXT: vfnmacc.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmacc.mask.nxv2f64.nxv2f64( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfnmacc.nxv4f64.nxv4f64( - , - , - , - i64); - -define @intrinsic_vfnmacc_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv4f64_nxv4f64_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu -; CHECK-NEXT: vfnmacc.vv v8, v12, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmacc.nxv4f64.nxv4f64( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfnmacc.mask.nxv4f64.nxv4f64( - , - , - , - , - i64); - -define @intrinsic_vfnmacc_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv4f64_nxv4f64_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu -; CHECK-NEXT: vfnmacc.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmacc.mask.nxv4f64.nxv4f64( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfnmacc.nxv1f16.f16( - , - half, - , - i64); - -define @intrinsic_vfnmacc_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv1f16_f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu -; CHECK-NEXT: vfnmacc.vf v8, fa0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmacc.nxv1f16.f16( - %0, - half %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfnmacc.mask.nxv1f16.f16( - , - half, - , - , - i64); - -define @intrinsic_vfnmacc_mask_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv1f16_f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu -; CHECK-NEXT: vfnmacc.vf v8, fa0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmacc.mask.nxv1f16.f16( - %0, - half %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfnmacc.nxv2f16.f16( - , - half, - , - i64); - -define @intrinsic_vfnmacc_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv2f16_f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu -; CHECK-NEXT: vfnmacc.vf v8, fa0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmacc.nxv2f16.f16( - %0, - half %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfnmacc.mask.nxv2f16.f16( - , - half, - , - , - i64); - -define @intrinsic_vfnmacc_mask_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv2f16_f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu -; CHECK-NEXT: vfnmacc.vf v8, fa0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmacc.mask.nxv2f16.f16( - %0, - half %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfnmacc.nxv4f16.f16( - , - half, - , - i64); - -define @intrinsic_vfnmacc_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv4f16_f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu -; CHECK-NEXT: vfnmacc.vf v8, fa0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmacc.nxv4f16.f16( - %0, - half %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfnmacc.mask.nxv4f16.f16( - , - half, - , - , - i64); - -define @intrinsic_vfnmacc_mask_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv4f16_f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu -; CHECK-NEXT: vfnmacc.vf v8, fa0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmacc.mask.nxv4f16.f16( - %0, - half %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfnmacc.nxv8f16.f16( - , - half, - , - i64); - -define @intrinsic_vfnmacc_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv8f16_f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu -; CHECK-NEXT: vfnmacc.vf v8, fa0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmacc.nxv8f16.f16( - %0, - half %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfnmacc.mask.nxv8f16.f16( - , - half, - , - , - i64); - -define @intrinsic_vfnmacc_mask_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv8f16_f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu -; CHECK-NEXT: vfnmacc.vf v8, fa0, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmacc.mask.nxv8f16.f16( - %0, - half %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfnmacc.nxv16f16.f16( - , - half, - , - i64); - -define @intrinsic_vfnmacc_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv16f16_f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu -; CHECK-NEXT: vfnmacc.vf v8, fa0, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmacc.nxv16f16.f16( - %0, - half %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfnmacc.mask.nxv16f16.f16( - , - half, - , - , - i64); - -define @intrinsic_vfnmacc_mask_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv16f16_f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu -; CHECK-NEXT: vfnmacc.vf v8, fa0, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmacc.mask.nxv16f16.f16( - %0, - half %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfnmacc.nxv1f32.f32( - , - float, - , - i64); - -define @intrinsic_vfnmacc_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv1f32_f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu -; CHECK-NEXT: vfnmacc.vf v8, fa0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmacc.nxv1f32.f32( - %0, - float %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfnmacc.mask.nxv1f32.f32( - , - float, - , - , - i64); - -define @intrinsic_vfnmacc_mask_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv1f32_f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu -; CHECK-NEXT: vfnmacc.vf v8, fa0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmacc.mask.nxv1f32.f32( - %0, - float %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfnmacc.nxv2f32.f32( - , - float, - , - i64); - -define @intrinsic_vfnmacc_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv2f32_f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu -; CHECK-NEXT: vfnmacc.vf v8, fa0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmacc.nxv2f32.f32( - %0, - float %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfnmacc.mask.nxv2f32.f32( - , - float, - , - , - i64); - -define @intrinsic_vfnmacc_mask_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv2f32_f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu -; CHECK-NEXT: vfnmacc.vf v8, fa0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmacc.mask.nxv2f32.f32( - %0, - float %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfnmacc.nxv4f32.f32( - , - float, - , - i64); - -define @intrinsic_vfnmacc_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv4f32_f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu -; CHECK-NEXT: vfnmacc.vf v8, fa0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmacc.nxv4f32.f32( - %0, - float %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfnmacc.mask.nxv4f32.f32( - , - float, - , - , - i64); - -define @intrinsic_vfnmacc_mask_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv4f32_f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu -; CHECK-NEXT: vfnmacc.vf v8, fa0, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmacc.mask.nxv4f32.f32( - %0, - float %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfnmacc.nxv8f32.f32( - , - float, - , - i64); - -define @intrinsic_vfnmacc_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv8f32_f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu -; CHECK-NEXT: vfnmacc.vf v8, fa0, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmacc.nxv8f32.f32( - %0, - float %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfnmacc.mask.nxv8f32.f32( - , - float, - , - , - i64); - -define @intrinsic_vfnmacc_mask_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv8f32_f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu -; CHECK-NEXT: vfnmacc.vf v8, fa0, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmacc.mask.nxv8f32.f32( - %0, - float %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfnmacc.nxv1f64.f64( - , - double, - , - i64); - -define @intrinsic_vfnmacc_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv1f64_f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu -; CHECK-NEXT: vfnmacc.vf v8, fa0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmacc.nxv1f64.f64( - %0, - double %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfnmacc.mask.nxv1f64.f64( - , - double, - , - , - i64); - -define @intrinsic_vfnmacc_mask_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv1f64_f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu -; CHECK-NEXT: vfnmacc.vf v8, fa0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmacc.mask.nxv1f64.f64( - %0, - double %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfnmacc.nxv2f64.f64( - , - double, - , - i64); - -define @intrinsic_vfnmacc_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv2f64_f64_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu -; CHECK-NEXT: vfnmacc.vf v8, fa0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmacc.nxv2f64.f64( - %0, - double %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfnmacc.mask.nxv2f64.f64( - , - double, - , - , - i64); - -define @intrinsic_vfnmacc_mask_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv2f64_f64_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu -; CHECK-NEXT: vfnmacc.vf v8, fa0, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmacc.mask.nxv2f64.f64( - %0, - double %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfnmacc.nxv4f64.f64( - , - double, - , - i64); - -define @intrinsic_vfnmacc_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv4f64_f64_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu -; CHECK-NEXT: vfnmacc.vf v8, fa0, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmacc.nxv4f64.f64( - %0, - double %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfnmacc.mask.nxv4f64.f64( - , - double, - , - , - i64); - -define @intrinsic_vfnmacc_mask_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv4f64_f64_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu -; CHECK-NEXT: vfnmacc.vf v8, fa0, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmacc.mask.nxv4f64.f64( - %0, - double %1, - %2, - %3, - i64 %4) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfnmacc-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmacc.ll similarity index 90% rename from llvm/test/CodeGen/RISCV/rvv/vfnmacc-rv32.ll rename to llvm/test/CodeGen/RISCV/rvv/vfnmacc.ll index fa1767202c12..d46c29f3be78 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfnmacc-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfnmacc.ll @@ -1,13 +1,15 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: -target-abi=ilp32d < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfnmacc.nxv1f16.nxv1f16( , , , - i32); + iXLen); -define @intrinsic_vfnmacc_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfnmacc_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu @@ -18,7 +20,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -28,9 +30,9 @@ declare @llvm.riscv.vfnmacc.mask.nxv1f16.nxv1f16( , , , - i32); + iXLen); -define @intrinsic_vfnmacc_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfnmacc_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu @@ -42,7 +44,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -51,9 +53,9 @@ declare @llvm.riscv.vfnmacc.nxv2f16.nxv2f16( , , , - i32); + iXLen); -define @intrinsic_vfnmacc_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfnmacc_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu @@ -64,7 +66,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -74,9 +76,9 @@ declare @llvm.riscv.vfnmacc.mask.nxv2f16.nxv2f16( , , , - i32); + iXLen); -define @intrinsic_vfnmacc_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfnmacc_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu @@ -88,7 +90,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -97,9 +99,9 @@ declare @llvm.riscv.vfnmacc.nxv4f16.nxv4f16( , , , - i32); + iXLen); -define @intrinsic_vfnmacc_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfnmacc_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu @@ -110,7 +112,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -120,9 +122,9 @@ declare @llvm.riscv.vfnmacc.mask.nxv4f16.nxv4f16( , , , - i32); + iXLen); -define @intrinsic_vfnmacc_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfnmacc_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu @@ -134,7 +136,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -143,9 +145,9 @@ declare @llvm.riscv.vfnmacc.nxv8f16.nxv8f16( , , , - i32); + iXLen); -define @intrinsic_vfnmacc_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfnmacc_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu @@ -156,7 +158,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -166,9 +168,9 @@ declare @llvm.riscv.vfnmacc.mask.nxv8f16.nxv8f16( , , , - i32); + iXLen); -define @intrinsic_vfnmacc_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfnmacc_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu @@ -180,7 +182,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -189,9 +191,9 @@ declare @llvm.riscv.vfnmacc.nxv16f16.nxv16f16( , , , - i32); + iXLen); -define @intrinsic_vfnmacc_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfnmacc_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu @@ -202,7 +204,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -212,9 +214,9 @@ declare @llvm.riscv.vfnmacc.mask.nxv16f16.nxv16f16( , , , - i32); + iXLen); -define @intrinsic_vfnmacc_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfnmacc_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu @@ -226,7 +228,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -235,9 +237,9 @@ declare @llvm.riscv.vfnmacc.nxv1f32.nxv1f32( , , , - i32); + iXLen); -define @intrinsic_vfnmacc_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfnmacc_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu @@ -248,7 +250,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -258,9 +260,9 @@ declare @llvm.riscv.vfnmacc.mask.nxv1f32.nxv1f32( , , , - i32); + iXLen); -define @intrinsic_vfnmacc_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfnmacc_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu @@ -272,7 +274,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -281,9 +283,9 @@ declare @llvm.riscv.vfnmacc.nxv2f32.nxv2f32( , , , - i32); + iXLen); -define @intrinsic_vfnmacc_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfnmacc_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu @@ -294,7 +296,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -304,9 +306,9 @@ declare @llvm.riscv.vfnmacc.mask.nxv2f32.nxv2f32( , , , - i32); + iXLen); -define @intrinsic_vfnmacc_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfnmacc_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu @@ -318,7 +320,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -327,9 +329,9 @@ declare @llvm.riscv.vfnmacc.nxv4f32.nxv4f32( , , , - i32); + iXLen); -define @intrinsic_vfnmacc_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfnmacc_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu @@ -340,7 +342,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -350,9 +352,9 @@ declare @llvm.riscv.vfnmacc.mask.nxv4f32.nxv4f32( , , , - i32); + iXLen); -define @intrinsic_vfnmacc_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfnmacc_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu @@ -364,7 +366,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -373,9 +375,9 @@ declare @llvm.riscv.vfnmacc.nxv8f32.nxv8f32( , , , - i32); + iXLen); -define @intrinsic_vfnmacc_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfnmacc_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu @@ -386,7 +388,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -396,9 +398,9 @@ declare @llvm.riscv.vfnmacc.mask.nxv8f32.nxv8f32( , , , - i32); + iXLen); -define @intrinsic_vfnmacc_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfnmacc_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu @@ -410,7 +412,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -419,9 +421,9 @@ declare @llvm.riscv.vfnmacc.nxv1f64.nxv1f64( , , , - i32); + iXLen); -define @intrinsic_vfnmacc_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfnmacc_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu @@ -432,7 +434,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -442,9 +444,9 @@ declare @llvm.riscv.vfnmacc.mask.nxv1f64.nxv1f64( , , , - i32); + iXLen); -define @intrinsic_vfnmacc_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfnmacc_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu @@ -456,7 +458,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -465,9 +467,9 @@ declare @llvm.riscv.vfnmacc.nxv2f64.nxv2f64( , , , - i32); + iXLen); -define @intrinsic_vfnmacc_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfnmacc_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu @@ -478,7 +480,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -488,9 +490,9 @@ declare @llvm.riscv.vfnmacc.mask.nxv2f64.nxv2f64( , , , - i32); + iXLen); -define @intrinsic_vfnmacc_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfnmacc_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu @@ -502,7 +504,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -511,9 +513,9 @@ declare @llvm.riscv.vfnmacc.nxv4f64.nxv4f64( , , , - i32); + iXLen); -define @intrinsic_vfnmacc_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfnmacc_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu @@ -524,7 +526,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -534,9 +536,9 @@ declare @llvm.riscv.vfnmacc.mask.nxv4f64.nxv4f64( , , , - i32); + iXLen); -define @intrinsic_vfnmacc_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfnmacc_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu @@ -548,7 +550,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -557,9 +559,9 @@ declare @llvm.riscv.vfnmacc.nxv1f16.f16( , half, , - i32); + iXLen); -define @intrinsic_vfnmacc_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, i32 %3) nounwind { +define @intrinsic_vfnmacc_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv1f16_f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu @@ -570,7 +572,7 @@ entry: %0, half %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -580,9 +582,9 @@ declare @llvm.riscv.vfnmacc.mask.nxv1f16.f16( half, , , - i32); + iXLen); -define @intrinsic_vfnmacc_mask_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfnmacc_mask_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv1f16_f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu @@ -594,7 +596,7 @@ entry: half %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -603,9 +605,9 @@ declare @llvm.riscv.vfnmacc.nxv2f16.f16( , half, , - i32); + iXLen); -define @intrinsic_vfnmacc_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, i32 %3) nounwind { +define @intrinsic_vfnmacc_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv2f16_f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu @@ -616,7 +618,7 @@ entry: %0, half %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -626,9 +628,9 @@ declare @llvm.riscv.vfnmacc.mask.nxv2f16.f16( half, , , - i32); + iXLen); -define @intrinsic_vfnmacc_mask_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfnmacc_mask_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv2f16_f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu @@ -640,7 +642,7 @@ entry: half %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -649,9 +651,9 @@ declare @llvm.riscv.vfnmacc.nxv4f16.f16( , half, , - i32); + iXLen); -define @intrinsic_vfnmacc_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, i32 %3) nounwind { +define @intrinsic_vfnmacc_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv4f16_f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu @@ -662,7 +664,7 @@ entry: %0, half %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -672,9 +674,9 @@ declare @llvm.riscv.vfnmacc.mask.nxv4f16.f16( half, , , - i32); + iXLen); -define @intrinsic_vfnmacc_mask_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfnmacc_mask_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv4f16_f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu @@ -686,7 +688,7 @@ entry: half %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -695,9 +697,9 @@ declare @llvm.riscv.vfnmacc.nxv8f16.f16( , half, , - i32); + iXLen); -define @intrinsic_vfnmacc_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, i32 %3) nounwind { +define @intrinsic_vfnmacc_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv8f16_f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu @@ -708,7 +710,7 @@ entry: %0, half %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -718,9 +720,9 @@ declare @llvm.riscv.vfnmacc.mask.nxv8f16.f16( half, , , - i32); + iXLen); -define @intrinsic_vfnmacc_mask_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfnmacc_mask_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv8f16_f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu @@ -732,7 +734,7 @@ entry: half %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -741,9 +743,9 @@ declare @llvm.riscv.vfnmacc.nxv16f16.f16( , half, , - i32); + iXLen); -define @intrinsic_vfnmacc_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, i32 %3) nounwind { +define @intrinsic_vfnmacc_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv16f16_f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu @@ -754,7 +756,7 @@ entry: %0, half %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -764,9 +766,9 @@ declare @llvm.riscv.vfnmacc.mask.nxv16f16.f16( half, , , - i32); + iXLen); -define @intrinsic_vfnmacc_mask_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfnmacc_mask_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv16f16_f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu @@ -778,7 +780,7 @@ entry: half %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -787,9 +789,9 @@ declare @llvm.riscv.vfnmacc.nxv1f32.f32( , float, , - i32); + iXLen); -define @intrinsic_vfnmacc_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, i32 %3) nounwind { +define @intrinsic_vfnmacc_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv1f32_f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu @@ -800,7 +802,7 @@ entry: %0, float %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -810,9 +812,9 @@ declare @llvm.riscv.vfnmacc.mask.nxv1f32.f32( float, , , - i32); + iXLen); -define @intrinsic_vfnmacc_mask_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfnmacc_mask_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv1f32_f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu @@ -824,7 +826,7 @@ entry: float %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -833,9 +835,9 @@ declare @llvm.riscv.vfnmacc.nxv2f32.f32( , float, , - i32); + iXLen); -define @intrinsic_vfnmacc_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, i32 %3) nounwind { +define @intrinsic_vfnmacc_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv2f32_f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu @@ -846,7 +848,7 @@ entry: %0, float %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -856,9 +858,9 @@ declare @llvm.riscv.vfnmacc.mask.nxv2f32.f32( float, , , - i32); + iXLen); -define @intrinsic_vfnmacc_mask_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfnmacc_mask_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv2f32_f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu @@ -870,7 +872,7 @@ entry: float %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -879,9 +881,9 @@ declare @llvm.riscv.vfnmacc.nxv4f32.f32( , float, , - i32); + iXLen); -define @intrinsic_vfnmacc_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, i32 %3) nounwind { +define @intrinsic_vfnmacc_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv4f32_f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu @@ -892,7 +894,7 @@ entry: %0, float %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -902,9 +904,9 @@ declare @llvm.riscv.vfnmacc.mask.nxv4f32.f32( float, , , - i32); + iXLen); -define @intrinsic_vfnmacc_mask_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfnmacc_mask_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv4f32_f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu @@ -916,7 +918,7 @@ entry: float %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -925,9 +927,9 @@ declare @llvm.riscv.vfnmacc.nxv8f32.f32( , float, , - i32); + iXLen); -define @intrinsic_vfnmacc_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, i32 %3) nounwind { +define @intrinsic_vfnmacc_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv8f32_f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu @@ -938,7 +940,7 @@ entry: %0, float %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -948,9 +950,9 @@ declare @llvm.riscv.vfnmacc.mask.nxv8f32.f32( float, , , - i32); + iXLen); -define @intrinsic_vfnmacc_mask_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfnmacc_mask_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv8f32_f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu @@ -962,7 +964,7 @@ entry: float %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -971,9 +973,9 @@ declare @llvm.riscv.vfnmacc.nxv1f64.f64( , double, , - i32); + iXLen); -define @intrinsic_vfnmacc_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, i32 %3) nounwind { +define @intrinsic_vfnmacc_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv1f64_f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu @@ -984,7 +986,7 @@ entry: %0, double %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -994,9 +996,9 @@ declare @llvm.riscv.vfnmacc.mask.nxv1f64.f64( double, , , - i32); + iXLen); -define @intrinsic_vfnmacc_mask_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfnmacc_mask_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv1f64_f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu @@ -1008,7 +1010,7 @@ entry: double %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -1017,9 +1019,9 @@ declare @llvm.riscv.vfnmacc.nxv2f64.f64( , double, , - i32); + iXLen); -define @intrinsic_vfnmacc_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, i32 %3) nounwind { +define @intrinsic_vfnmacc_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv2f64_f64_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu @@ -1030,7 +1032,7 @@ entry: %0, double %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -1040,9 +1042,9 @@ declare @llvm.riscv.vfnmacc.mask.nxv2f64.f64( double, , , - i32); + iXLen); -define @intrinsic_vfnmacc_mask_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfnmacc_mask_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv2f64_f64_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu @@ -1054,7 +1056,7 @@ entry: double %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -1063,9 +1065,9 @@ declare @llvm.riscv.vfnmacc.nxv4f64.f64( , double, , - i32); + iXLen); -define @intrinsic_vfnmacc_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, i32 %3) nounwind { +define @intrinsic_vfnmacc_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv4f64_f64_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu @@ -1076,7 +1078,7 @@ entry: %0, double %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -1086,9 +1088,9 @@ declare @llvm.riscv.vfnmacc.mask.nxv4f64.f64( double, , , - i32); + iXLen); -define @intrinsic_vfnmacc_mask_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfnmacc_mask_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv4f64_f64_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu @@ -1100,7 +1102,7 @@ entry: double %1, %2, %3, - i32 %4) + iXLen %4) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfnmadd-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmadd-rv64.ll deleted file mode 100644 index ab407427952a..000000000000 --- a/llvm/test/CodeGen/RISCV/rvv/vfnmadd-rv64.ll +++ /dev/null @@ -1,1106 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: -target-abi=lp64d < %s | FileCheck %s -declare @llvm.riscv.vfnmadd.nxv1f16.nxv1f16( - , - , - , - i64); - -define @intrinsic_vfnmadd_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv1f16_nxv1f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu -; CHECK-NEXT: vfnmadd.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmadd.nxv1f16.nxv1f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfnmadd.mask.nxv1f16.nxv1f16( - , - , - , - , - i64); - -define @intrinsic_vfnmadd_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv1f16_nxv1f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu -; CHECK-NEXT: vfnmadd.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmadd.mask.nxv1f16.nxv1f16( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfnmadd.nxv2f16.nxv2f16( - , - , - , - i64); - -define @intrinsic_vfnmadd_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv2f16_nxv2f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu -; CHECK-NEXT: vfnmadd.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmadd.nxv2f16.nxv2f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfnmadd.mask.nxv2f16.nxv2f16( - , - , - , - , - i64); - -define @intrinsic_vfnmadd_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv2f16_nxv2f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu -; CHECK-NEXT: vfnmadd.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmadd.mask.nxv2f16.nxv2f16( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfnmadd.nxv4f16.nxv4f16( - , - , - , - i64); - -define @intrinsic_vfnmadd_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv4f16_nxv4f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu -; CHECK-NEXT: vfnmadd.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmadd.nxv4f16.nxv4f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfnmadd.mask.nxv4f16.nxv4f16( - , - , - , - , - i64); - -define @intrinsic_vfnmadd_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv4f16_nxv4f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu -; CHECK-NEXT: vfnmadd.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmadd.mask.nxv4f16.nxv4f16( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfnmadd.nxv8f16.nxv8f16( - , - , - , - i64); - -define @intrinsic_vfnmadd_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv8f16_nxv8f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu -; CHECK-NEXT: vfnmadd.vv v8, v10, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmadd.nxv8f16.nxv8f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfnmadd.mask.nxv8f16.nxv8f16( - , - , - , - , - i64); - -define @intrinsic_vfnmadd_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv8f16_nxv8f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu -; CHECK-NEXT: vfnmadd.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmadd.mask.nxv8f16.nxv8f16( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfnmadd.nxv16f16.nxv16f16( - , - , - , - i64); - -define @intrinsic_vfnmadd_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv16f16_nxv16f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu -; CHECK-NEXT: vfnmadd.vv v8, v12, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmadd.nxv16f16.nxv16f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfnmadd.mask.nxv16f16.nxv16f16( - , - , - , - , - i64); - -define @intrinsic_vfnmadd_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv16f16_nxv16f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu -; CHECK-NEXT: vfnmadd.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmadd.mask.nxv16f16.nxv16f16( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfnmadd.nxv1f32.nxv1f32( - , - , - , - i64); - -define @intrinsic_vfnmadd_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv1f32_nxv1f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu -; CHECK-NEXT: vfnmadd.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmadd.nxv1f32.nxv1f32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfnmadd.mask.nxv1f32.nxv1f32( - , - , - , - , - i64); - -define @intrinsic_vfnmadd_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv1f32_nxv1f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu -; CHECK-NEXT: vfnmadd.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmadd.mask.nxv1f32.nxv1f32( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfnmadd.nxv2f32.nxv2f32( - , - , - , - i64); - -define @intrinsic_vfnmadd_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv2f32_nxv2f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu -; CHECK-NEXT: vfnmadd.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmadd.nxv2f32.nxv2f32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfnmadd.mask.nxv2f32.nxv2f32( - , - , - , - , - i64); - -define @intrinsic_vfnmadd_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv2f32_nxv2f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu -; CHECK-NEXT: vfnmadd.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmadd.mask.nxv2f32.nxv2f32( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfnmadd.nxv4f32.nxv4f32( - , - , - , - i64); - -define @intrinsic_vfnmadd_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv4f32_nxv4f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu -; CHECK-NEXT: vfnmadd.vv v8, v10, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmadd.nxv4f32.nxv4f32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfnmadd.mask.nxv4f32.nxv4f32( - , - , - , - , - i64); - -define @intrinsic_vfnmadd_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv4f32_nxv4f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu -; CHECK-NEXT: vfnmadd.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmadd.mask.nxv4f32.nxv4f32( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfnmadd.nxv8f32.nxv8f32( - , - , - , - i64); - -define @intrinsic_vfnmadd_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv8f32_nxv8f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu -; CHECK-NEXT: vfnmadd.vv v8, v12, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmadd.nxv8f32.nxv8f32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfnmadd.mask.nxv8f32.nxv8f32( - , - , - , - , - i64); - -define @intrinsic_vfnmadd_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv8f32_nxv8f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu -; CHECK-NEXT: vfnmadd.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmadd.mask.nxv8f32.nxv8f32( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfnmadd.nxv1f64.nxv1f64( - , - , - , - i64); - -define @intrinsic_vfnmadd_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv1f64_nxv1f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu -; CHECK-NEXT: vfnmadd.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmadd.nxv1f64.nxv1f64( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfnmadd.mask.nxv1f64.nxv1f64( - , - , - , - , - i64); - -define @intrinsic_vfnmadd_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv1f64_nxv1f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu -; CHECK-NEXT: vfnmadd.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmadd.mask.nxv1f64.nxv1f64( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfnmadd.nxv2f64.nxv2f64( - , - , - , - i64); - -define @intrinsic_vfnmadd_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv2f64_nxv2f64_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu -; CHECK-NEXT: vfnmadd.vv v8, v10, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmadd.nxv2f64.nxv2f64( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfnmadd.mask.nxv2f64.nxv2f64( - , - , - , - , - i64); - -define @intrinsic_vfnmadd_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv2f64_nxv2f64_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu -; CHECK-NEXT: vfnmadd.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmadd.mask.nxv2f64.nxv2f64( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfnmadd.nxv4f64.nxv4f64( - , - , - , - i64); - -define @intrinsic_vfnmadd_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv4f64_nxv4f64_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu -; CHECK-NEXT: vfnmadd.vv v8, v12, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmadd.nxv4f64.nxv4f64( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfnmadd.mask.nxv4f64.nxv4f64( - , - , - , - , - i64); - -define @intrinsic_vfnmadd_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv4f64_nxv4f64_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu -; CHECK-NEXT: vfnmadd.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmadd.mask.nxv4f64.nxv4f64( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfnmadd.nxv1f16.f16( - , - half, - , - i64); - -define @intrinsic_vfnmadd_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv1f16_f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu -; CHECK-NEXT: vfnmadd.vf v8, fa0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmadd.nxv1f16.f16( - %0, - half %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfnmadd.mask.nxv1f16.f16( - , - half, - , - , - i64); - -define @intrinsic_vfnmadd_mask_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv1f16_f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu -; CHECK-NEXT: vfnmadd.vf v8, fa0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmadd.mask.nxv1f16.f16( - %0, - half %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfnmadd.nxv2f16.f16( - , - half, - , - i64); - -define @intrinsic_vfnmadd_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv2f16_f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu -; CHECK-NEXT: vfnmadd.vf v8, fa0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmadd.nxv2f16.f16( - %0, - half %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfnmadd.mask.nxv2f16.f16( - , - half, - , - , - i64); - -define @intrinsic_vfnmadd_mask_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv2f16_f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu -; CHECK-NEXT: vfnmadd.vf v8, fa0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmadd.mask.nxv2f16.f16( - %0, - half %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfnmadd.nxv4f16.f16( - , - half, - , - i64); - -define @intrinsic_vfnmadd_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv4f16_f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu -; CHECK-NEXT: vfnmadd.vf v8, fa0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmadd.nxv4f16.f16( - %0, - half %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfnmadd.mask.nxv4f16.f16( - , - half, - , - , - i64); - -define @intrinsic_vfnmadd_mask_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv4f16_f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu -; CHECK-NEXT: vfnmadd.vf v8, fa0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmadd.mask.nxv4f16.f16( - %0, - half %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfnmadd.nxv8f16.f16( - , - half, - , - i64); - -define @intrinsic_vfnmadd_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv8f16_f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu -; CHECK-NEXT: vfnmadd.vf v8, fa0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmadd.nxv8f16.f16( - %0, - half %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfnmadd.mask.nxv8f16.f16( - , - half, - , - , - i64); - -define @intrinsic_vfnmadd_mask_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv8f16_f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu -; CHECK-NEXT: vfnmadd.vf v8, fa0, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmadd.mask.nxv8f16.f16( - %0, - half %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfnmadd.nxv16f16.f16( - , - half, - , - i64); - -define @intrinsic_vfnmadd_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv16f16_f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu -; CHECK-NEXT: vfnmadd.vf v8, fa0, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmadd.nxv16f16.f16( - %0, - half %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfnmadd.mask.nxv16f16.f16( - , - half, - , - , - i64); - -define @intrinsic_vfnmadd_mask_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv16f16_f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu -; CHECK-NEXT: vfnmadd.vf v8, fa0, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmadd.mask.nxv16f16.f16( - %0, - half %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfnmadd.nxv1f32.f32( - , - float, - , - i64); - -define @intrinsic_vfnmadd_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv1f32_f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu -; CHECK-NEXT: vfnmadd.vf v8, fa0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmadd.nxv1f32.f32( - %0, - float %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfnmadd.mask.nxv1f32.f32( - , - float, - , - , - i64); - -define @intrinsic_vfnmadd_mask_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv1f32_f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu -; CHECK-NEXT: vfnmadd.vf v8, fa0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmadd.mask.nxv1f32.f32( - %0, - float %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfnmadd.nxv2f32.f32( - , - float, - , - i64); - -define @intrinsic_vfnmadd_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv2f32_f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu -; CHECK-NEXT: vfnmadd.vf v8, fa0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmadd.nxv2f32.f32( - %0, - float %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfnmadd.mask.nxv2f32.f32( - , - float, - , - , - i64); - -define @intrinsic_vfnmadd_mask_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv2f32_f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu -; CHECK-NEXT: vfnmadd.vf v8, fa0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmadd.mask.nxv2f32.f32( - %0, - float %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfnmadd.nxv4f32.f32( - , - float, - , - i64); - -define @intrinsic_vfnmadd_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv4f32_f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu -; CHECK-NEXT: vfnmadd.vf v8, fa0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmadd.nxv4f32.f32( - %0, - float %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfnmadd.mask.nxv4f32.f32( - , - float, - , - , - i64); - -define @intrinsic_vfnmadd_mask_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv4f32_f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu -; CHECK-NEXT: vfnmadd.vf v8, fa0, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmadd.mask.nxv4f32.f32( - %0, - float %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfnmadd.nxv8f32.f32( - , - float, - , - i64); - -define @intrinsic_vfnmadd_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv8f32_f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu -; CHECK-NEXT: vfnmadd.vf v8, fa0, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmadd.nxv8f32.f32( - %0, - float %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfnmadd.mask.nxv8f32.f32( - , - float, - , - , - i64); - -define @intrinsic_vfnmadd_mask_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv8f32_f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu -; CHECK-NEXT: vfnmadd.vf v8, fa0, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmadd.mask.nxv8f32.f32( - %0, - float %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfnmadd.nxv1f64.f64( - , - double, - , - i64); - -define @intrinsic_vfnmadd_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv1f64_f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu -; CHECK-NEXT: vfnmadd.vf v8, fa0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmadd.nxv1f64.f64( - %0, - double %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfnmadd.mask.nxv1f64.f64( - , - double, - , - , - i64); - -define @intrinsic_vfnmadd_mask_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv1f64_f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu -; CHECK-NEXT: vfnmadd.vf v8, fa0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmadd.mask.nxv1f64.f64( - %0, - double %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfnmadd.nxv2f64.f64( - , - double, - , - i64); - -define @intrinsic_vfnmadd_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv2f64_f64_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu -; CHECK-NEXT: vfnmadd.vf v8, fa0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmadd.nxv2f64.f64( - %0, - double %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfnmadd.mask.nxv2f64.f64( - , - double, - , - , - i64); - -define @intrinsic_vfnmadd_mask_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv2f64_f64_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu -; CHECK-NEXT: vfnmadd.vf v8, fa0, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmadd.mask.nxv2f64.f64( - %0, - double %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfnmadd.nxv4f64.f64( - , - double, - , - i64); - -define @intrinsic_vfnmadd_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv4f64_f64_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu -; CHECK-NEXT: vfnmadd.vf v8, fa0, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmadd.nxv4f64.f64( - %0, - double %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfnmadd.mask.nxv4f64.f64( - , - double, - , - , - i64); - -define @intrinsic_vfnmadd_mask_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv4f64_f64_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu -; CHECK-NEXT: vfnmadd.vf v8, fa0, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmadd.mask.nxv4f64.f64( - %0, - double %1, - %2, - %3, - i64 %4) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfnmadd-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmadd.ll similarity index 90% rename from llvm/test/CodeGen/RISCV/rvv/vfnmadd-rv32.ll rename to llvm/test/CodeGen/RISCV/rvv/vfnmadd.ll index e0d33062322b..44810af5ab31 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfnmadd-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfnmadd.ll @@ -1,13 +1,15 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: -target-abi=ilp32d < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfnmadd.nxv1f16.nxv1f16( , , , - i32); + iXLen); -define @intrinsic_vfnmadd_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfnmadd_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu @@ -18,7 +20,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -28,9 +30,9 @@ declare @llvm.riscv.vfnmadd.mask.nxv1f16.nxv1f16( , , , - i32); + iXLen); -define @intrinsic_vfnmadd_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfnmadd_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu @@ -42,7 +44,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -51,9 +53,9 @@ declare @llvm.riscv.vfnmadd.nxv2f16.nxv2f16( , , , - i32); + iXLen); -define @intrinsic_vfnmadd_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfnmadd_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu @@ -64,7 +66,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -74,9 +76,9 @@ declare @llvm.riscv.vfnmadd.mask.nxv2f16.nxv2f16( , , , - i32); + iXLen); -define @intrinsic_vfnmadd_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfnmadd_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu @@ -88,7 +90,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -97,9 +99,9 @@ declare @llvm.riscv.vfnmadd.nxv4f16.nxv4f16( , , , - i32); + iXLen); -define @intrinsic_vfnmadd_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfnmadd_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu @@ -110,7 +112,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -120,9 +122,9 @@ declare @llvm.riscv.vfnmadd.mask.nxv4f16.nxv4f16( , , , - i32); + iXLen); -define @intrinsic_vfnmadd_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfnmadd_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu @@ -134,7 +136,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -143,9 +145,9 @@ declare @llvm.riscv.vfnmadd.nxv8f16.nxv8f16( , , , - i32); + iXLen); -define @intrinsic_vfnmadd_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfnmadd_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu @@ -156,7 +158,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -166,9 +168,9 @@ declare @llvm.riscv.vfnmadd.mask.nxv8f16.nxv8f16( , , , - i32); + iXLen); -define @intrinsic_vfnmadd_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfnmadd_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu @@ -180,7 +182,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -189,9 +191,9 @@ declare @llvm.riscv.vfnmadd.nxv16f16.nxv16f16( , , , - i32); + iXLen); -define @intrinsic_vfnmadd_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfnmadd_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu @@ -202,7 +204,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -212,9 +214,9 @@ declare @llvm.riscv.vfnmadd.mask.nxv16f16.nxv16f16( , , , - i32); + iXLen); -define @intrinsic_vfnmadd_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfnmadd_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu @@ -226,7 +228,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -235,9 +237,9 @@ declare @llvm.riscv.vfnmadd.nxv1f32.nxv1f32( , , , - i32); + iXLen); -define @intrinsic_vfnmadd_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfnmadd_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu @@ -248,7 +250,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -258,9 +260,9 @@ declare @llvm.riscv.vfnmadd.mask.nxv1f32.nxv1f32( , , , - i32); + iXLen); -define @intrinsic_vfnmadd_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfnmadd_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu @@ -272,7 +274,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -281,9 +283,9 @@ declare @llvm.riscv.vfnmadd.nxv2f32.nxv2f32( , , , - i32); + iXLen); -define @intrinsic_vfnmadd_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfnmadd_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu @@ -294,7 +296,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -304,9 +306,9 @@ declare @llvm.riscv.vfnmadd.mask.nxv2f32.nxv2f32( , , , - i32); + iXLen); -define @intrinsic_vfnmadd_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfnmadd_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu @@ -318,7 +320,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -327,9 +329,9 @@ declare @llvm.riscv.vfnmadd.nxv4f32.nxv4f32( , , , - i32); + iXLen); -define @intrinsic_vfnmadd_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfnmadd_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu @@ -340,7 +342,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -350,9 +352,9 @@ declare @llvm.riscv.vfnmadd.mask.nxv4f32.nxv4f32( , , , - i32); + iXLen); -define @intrinsic_vfnmadd_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfnmadd_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu @@ -364,7 +366,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -373,9 +375,9 @@ declare @llvm.riscv.vfnmadd.nxv8f32.nxv8f32( , , , - i32); + iXLen); -define @intrinsic_vfnmadd_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfnmadd_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu @@ -386,7 +388,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -396,9 +398,9 @@ declare @llvm.riscv.vfnmadd.mask.nxv8f32.nxv8f32( , , , - i32); + iXLen); -define @intrinsic_vfnmadd_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfnmadd_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu @@ -410,7 +412,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -419,9 +421,9 @@ declare @llvm.riscv.vfnmadd.nxv1f64.nxv1f64( , , , - i32); + iXLen); -define @intrinsic_vfnmadd_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfnmadd_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu @@ -432,7 +434,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -442,9 +444,9 @@ declare @llvm.riscv.vfnmadd.mask.nxv1f64.nxv1f64( , , , - i32); + iXLen); -define @intrinsic_vfnmadd_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfnmadd_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu @@ -456,7 +458,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -465,9 +467,9 @@ declare @llvm.riscv.vfnmadd.nxv2f64.nxv2f64( , , , - i32); + iXLen); -define @intrinsic_vfnmadd_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfnmadd_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu @@ -478,7 +480,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -488,9 +490,9 @@ declare @llvm.riscv.vfnmadd.mask.nxv2f64.nxv2f64( , , , - i32); + iXLen); -define @intrinsic_vfnmadd_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfnmadd_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu @@ -502,7 +504,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -511,9 +513,9 @@ declare @llvm.riscv.vfnmadd.nxv4f64.nxv4f64( , , , - i32); + iXLen); -define @intrinsic_vfnmadd_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfnmadd_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu @@ -524,7 +526,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -534,9 +536,9 @@ declare @llvm.riscv.vfnmadd.mask.nxv4f64.nxv4f64( , , , - i32); + iXLen); -define @intrinsic_vfnmadd_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfnmadd_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu @@ -548,7 +550,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -557,9 +559,9 @@ declare @llvm.riscv.vfnmadd.nxv1f16.f16( , half, , - i32); + iXLen); -define @intrinsic_vfnmadd_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, i32 %3) nounwind { +define @intrinsic_vfnmadd_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv1f16_f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu @@ -570,7 +572,7 @@ entry: %0, half %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -580,9 +582,9 @@ declare @llvm.riscv.vfnmadd.mask.nxv1f16.f16( half, , , - i32); + iXLen); -define @intrinsic_vfnmadd_mask_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfnmadd_mask_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv1f16_f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu @@ -594,7 +596,7 @@ entry: half %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -603,9 +605,9 @@ declare @llvm.riscv.vfnmadd.nxv2f16.f16( , half, , - i32); + iXLen); -define @intrinsic_vfnmadd_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, i32 %3) nounwind { +define @intrinsic_vfnmadd_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv2f16_f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu @@ -616,7 +618,7 @@ entry: %0, half %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -626,9 +628,9 @@ declare @llvm.riscv.vfnmadd.mask.nxv2f16.f16( half, , , - i32); + iXLen); -define @intrinsic_vfnmadd_mask_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfnmadd_mask_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv2f16_f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu @@ -640,7 +642,7 @@ entry: half %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -649,9 +651,9 @@ declare @llvm.riscv.vfnmadd.nxv4f16.f16( , half, , - i32); + iXLen); -define @intrinsic_vfnmadd_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, i32 %3) nounwind { +define @intrinsic_vfnmadd_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv4f16_f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu @@ -662,7 +664,7 @@ entry: %0, half %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -672,9 +674,9 @@ declare @llvm.riscv.vfnmadd.mask.nxv4f16.f16( half, , , - i32); + iXLen); -define @intrinsic_vfnmadd_mask_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfnmadd_mask_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv4f16_f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu @@ -686,7 +688,7 @@ entry: half %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -695,9 +697,9 @@ declare @llvm.riscv.vfnmadd.nxv8f16.f16( , half, , - i32); + iXLen); -define @intrinsic_vfnmadd_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, i32 %3) nounwind { +define @intrinsic_vfnmadd_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv8f16_f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu @@ -708,7 +710,7 @@ entry: %0, half %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -718,9 +720,9 @@ declare @llvm.riscv.vfnmadd.mask.nxv8f16.f16( half, , , - i32); + iXLen); -define @intrinsic_vfnmadd_mask_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfnmadd_mask_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv8f16_f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu @@ -732,7 +734,7 @@ entry: half %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -741,9 +743,9 @@ declare @llvm.riscv.vfnmadd.nxv16f16.f16( , half, , - i32); + iXLen); -define @intrinsic_vfnmadd_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, i32 %3) nounwind { +define @intrinsic_vfnmadd_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv16f16_f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu @@ -754,7 +756,7 @@ entry: %0, half %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -764,9 +766,9 @@ declare @llvm.riscv.vfnmadd.mask.nxv16f16.f16( half, , , - i32); + iXLen); -define @intrinsic_vfnmadd_mask_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfnmadd_mask_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv16f16_f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu @@ -778,7 +780,7 @@ entry: half %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -787,9 +789,9 @@ declare @llvm.riscv.vfnmadd.nxv1f32.f32( , float, , - i32); + iXLen); -define @intrinsic_vfnmadd_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, i32 %3) nounwind { +define @intrinsic_vfnmadd_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv1f32_f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu @@ -800,7 +802,7 @@ entry: %0, float %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -810,9 +812,9 @@ declare @llvm.riscv.vfnmadd.mask.nxv1f32.f32( float, , , - i32); + iXLen); -define @intrinsic_vfnmadd_mask_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfnmadd_mask_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv1f32_f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu @@ -824,7 +826,7 @@ entry: float %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -833,9 +835,9 @@ declare @llvm.riscv.vfnmadd.nxv2f32.f32( , float, , - i32); + iXLen); -define @intrinsic_vfnmadd_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, i32 %3) nounwind { +define @intrinsic_vfnmadd_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv2f32_f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu @@ -846,7 +848,7 @@ entry: %0, float %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -856,9 +858,9 @@ declare @llvm.riscv.vfnmadd.mask.nxv2f32.f32( float, , , - i32); + iXLen); -define @intrinsic_vfnmadd_mask_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfnmadd_mask_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv2f32_f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu @@ -870,7 +872,7 @@ entry: float %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -879,9 +881,9 @@ declare @llvm.riscv.vfnmadd.nxv4f32.f32( , float, , - i32); + iXLen); -define @intrinsic_vfnmadd_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, i32 %3) nounwind { +define @intrinsic_vfnmadd_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv4f32_f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu @@ -892,7 +894,7 @@ entry: %0, float %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -902,9 +904,9 @@ declare @llvm.riscv.vfnmadd.mask.nxv4f32.f32( float, , , - i32); + iXLen); -define @intrinsic_vfnmadd_mask_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfnmadd_mask_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv4f32_f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu @@ -916,7 +918,7 @@ entry: float %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -925,9 +927,9 @@ declare @llvm.riscv.vfnmadd.nxv8f32.f32( , float, , - i32); + iXLen); -define @intrinsic_vfnmadd_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, i32 %3) nounwind { +define @intrinsic_vfnmadd_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv8f32_f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu @@ -938,7 +940,7 @@ entry: %0, float %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -948,9 +950,9 @@ declare @llvm.riscv.vfnmadd.mask.nxv8f32.f32( float, , , - i32); + iXLen); -define @intrinsic_vfnmadd_mask_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfnmadd_mask_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv8f32_f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu @@ -962,7 +964,7 @@ entry: float %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -971,9 +973,9 @@ declare @llvm.riscv.vfnmadd.nxv1f64.f64( , double, , - i32); + iXLen); -define @intrinsic_vfnmadd_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, i32 %3) nounwind { +define @intrinsic_vfnmadd_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv1f64_f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu @@ -984,7 +986,7 @@ entry: %0, double %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -994,9 +996,9 @@ declare @llvm.riscv.vfnmadd.mask.nxv1f64.f64( double, , , - i32); + iXLen); -define @intrinsic_vfnmadd_mask_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfnmadd_mask_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv1f64_f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu @@ -1008,7 +1010,7 @@ entry: double %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -1017,9 +1019,9 @@ declare @llvm.riscv.vfnmadd.nxv2f64.f64( , double, , - i32); + iXLen); -define @intrinsic_vfnmadd_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, i32 %3) nounwind { +define @intrinsic_vfnmadd_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv2f64_f64_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu @@ -1030,7 +1032,7 @@ entry: %0, double %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -1040,9 +1042,9 @@ declare @llvm.riscv.vfnmadd.mask.nxv2f64.f64( double, , , - i32); + iXLen); -define @intrinsic_vfnmadd_mask_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfnmadd_mask_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv2f64_f64_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu @@ -1054,7 +1056,7 @@ entry: double %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -1063,9 +1065,9 @@ declare @llvm.riscv.vfnmadd.nxv4f64.f64( , double, , - i32); + iXLen); -define @intrinsic_vfnmadd_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, i32 %3) nounwind { +define @intrinsic_vfnmadd_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv4f64_f64_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu @@ -1076,7 +1078,7 @@ entry: %0, double %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -1086,9 +1088,9 @@ declare @llvm.riscv.vfnmadd.mask.nxv4f64.f64( double, , , - i32); + iXLen); -define @intrinsic_vfnmadd_mask_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfnmadd_mask_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv4f64_f64_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu @@ -1100,7 +1102,7 @@ entry: double %1, %2, %3, - i32 %4) + iXLen %4) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfnmsac-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmsac-rv64.ll deleted file mode 100644 index 58e489618bc4..000000000000 --- a/llvm/test/CodeGen/RISCV/rvv/vfnmsac-rv64.ll +++ /dev/null @@ -1,1106 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: -target-abi=lp64d < %s | FileCheck %s -declare @llvm.riscv.vfnmsac.nxv1f16.nxv1f16( - , - , - , - i64); - -define @intrinsic_vfnmsac_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv1f16_nxv1f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu -; CHECK-NEXT: vfnmsac.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsac.nxv1f16.nxv1f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfnmsac.mask.nxv1f16.nxv1f16( - , - , - , - , - i64); - -define @intrinsic_vfnmsac_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv1f16_nxv1f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu -; CHECK-NEXT: vfnmsac.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsac.mask.nxv1f16.nxv1f16( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfnmsac.nxv2f16.nxv2f16( - , - , - , - i64); - -define @intrinsic_vfnmsac_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv2f16_nxv2f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu -; CHECK-NEXT: vfnmsac.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsac.nxv2f16.nxv2f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfnmsac.mask.nxv2f16.nxv2f16( - , - , - , - , - i64); - -define @intrinsic_vfnmsac_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv2f16_nxv2f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu -; CHECK-NEXT: vfnmsac.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsac.mask.nxv2f16.nxv2f16( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfnmsac.nxv4f16.nxv4f16( - , - , - , - i64); - -define @intrinsic_vfnmsac_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv4f16_nxv4f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu -; CHECK-NEXT: vfnmsac.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsac.nxv4f16.nxv4f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfnmsac.mask.nxv4f16.nxv4f16( - , - , - , - , - i64); - -define @intrinsic_vfnmsac_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv4f16_nxv4f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu -; CHECK-NEXT: vfnmsac.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsac.mask.nxv4f16.nxv4f16( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfnmsac.nxv8f16.nxv8f16( - , - , - , - i64); - -define @intrinsic_vfnmsac_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv8f16_nxv8f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu -; CHECK-NEXT: vfnmsac.vv v8, v10, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsac.nxv8f16.nxv8f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfnmsac.mask.nxv8f16.nxv8f16( - , - , - , - , - i64); - -define @intrinsic_vfnmsac_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv8f16_nxv8f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu -; CHECK-NEXT: vfnmsac.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsac.mask.nxv8f16.nxv8f16( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfnmsac.nxv16f16.nxv16f16( - , - , - , - i64); - -define @intrinsic_vfnmsac_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv16f16_nxv16f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu -; CHECK-NEXT: vfnmsac.vv v8, v12, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsac.nxv16f16.nxv16f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfnmsac.mask.nxv16f16.nxv16f16( - , - , - , - , - i64); - -define @intrinsic_vfnmsac_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv16f16_nxv16f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu -; CHECK-NEXT: vfnmsac.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsac.mask.nxv16f16.nxv16f16( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfnmsac.nxv1f32.nxv1f32( - , - , - , - i64); - -define @intrinsic_vfnmsac_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv1f32_nxv1f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu -; CHECK-NEXT: vfnmsac.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsac.nxv1f32.nxv1f32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfnmsac.mask.nxv1f32.nxv1f32( - , - , - , - , - i64); - -define @intrinsic_vfnmsac_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv1f32_nxv1f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu -; CHECK-NEXT: vfnmsac.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsac.mask.nxv1f32.nxv1f32( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfnmsac.nxv2f32.nxv2f32( - , - , - , - i64); - -define @intrinsic_vfnmsac_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv2f32_nxv2f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu -; CHECK-NEXT: vfnmsac.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsac.nxv2f32.nxv2f32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfnmsac.mask.nxv2f32.nxv2f32( - , - , - , - , - i64); - -define @intrinsic_vfnmsac_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv2f32_nxv2f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu -; CHECK-NEXT: vfnmsac.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsac.mask.nxv2f32.nxv2f32( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfnmsac.nxv4f32.nxv4f32( - , - , - , - i64); - -define @intrinsic_vfnmsac_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv4f32_nxv4f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu -; CHECK-NEXT: vfnmsac.vv v8, v10, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsac.nxv4f32.nxv4f32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfnmsac.mask.nxv4f32.nxv4f32( - , - , - , - , - i64); - -define @intrinsic_vfnmsac_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv4f32_nxv4f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu -; CHECK-NEXT: vfnmsac.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsac.mask.nxv4f32.nxv4f32( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfnmsac.nxv8f32.nxv8f32( - , - , - , - i64); - -define @intrinsic_vfnmsac_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv8f32_nxv8f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu -; CHECK-NEXT: vfnmsac.vv v8, v12, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsac.nxv8f32.nxv8f32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfnmsac.mask.nxv8f32.nxv8f32( - , - , - , - , - i64); - -define @intrinsic_vfnmsac_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv8f32_nxv8f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu -; CHECK-NEXT: vfnmsac.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsac.mask.nxv8f32.nxv8f32( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfnmsac.nxv1f64.nxv1f64( - , - , - , - i64); - -define @intrinsic_vfnmsac_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv1f64_nxv1f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu -; CHECK-NEXT: vfnmsac.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsac.nxv1f64.nxv1f64( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfnmsac.mask.nxv1f64.nxv1f64( - , - , - , - , - i64); - -define @intrinsic_vfnmsac_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv1f64_nxv1f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu -; CHECK-NEXT: vfnmsac.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsac.mask.nxv1f64.nxv1f64( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfnmsac.nxv2f64.nxv2f64( - , - , - , - i64); - -define @intrinsic_vfnmsac_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv2f64_nxv2f64_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu -; CHECK-NEXT: vfnmsac.vv v8, v10, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsac.nxv2f64.nxv2f64( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfnmsac.mask.nxv2f64.nxv2f64( - , - , - , - , - i64); - -define @intrinsic_vfnmsac_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv2f64_nxv2f64_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu -; CHECK-NEXT: vfnmsac.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsac.mask.nxv2f64.nxv2f64( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfnmsac.nxv4f64.nxv4f64( - , - , - , - i64); - -define @intrinsic_vfnmsac_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv4f64_nxv4f64_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu -; CHECK-NEXT: vfnmsac.vv v8, v12, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsac.nxv4f64.nxv4f64( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfnmsac.mask.nxv4f64.nxv4f64( - , - , - , - , - i64); - -define @intrinsic_vfnmsac_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv4f64_nxv4f64_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu -; CHECK-NEXT: vfnmsac.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsac.mask.nxv4f64.nxv4f64( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfnmsac.nxv1f16.f16( - , - half, - , - i64); - -define @intrinsic_vfnmsac_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv1f16_f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu -; CHECK-NEXT: vfnmsac.vf v8, fa0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsac.nxv1f16.f16( - %0, - half %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfnmsac.mask.nxv1f16.f16( - , - half, - , - , - i64); - -define @intrinsic_vfnmsac_mask_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv1f16_f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu -; CHECK-NEXT: vfnmsac.vf v8, fa0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsac.mask.nxv1f16.f16( - %0, - half %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfnmsac.nxv2f16.f16( - , - half, - , - i64); - -define @intrinsic_vfnmsac_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv2f16_f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu -; CHECK-NEXT: vfnmsac.vf v8, fa0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsac.nxv2f16.f16( - %0, - half %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfnmsac.mask.nxv2f16.f16( - , - half, - , - , - i64); - -define @intrinsic_vfnmsac_mask_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv2f16_f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu -; CHECK-NEXT: vfnmsac.vf v8, fa0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsac.mask.nxv2f16.f16( - %0, - half %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfnmsac.nxv4f16.f16( - , - half, - , - i64); - -define @intrinsic_vfnmsac_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv4f16_f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu -; CHECK-NEXT: vfnmsac.vf v8, fa0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsac.nxv4f16.f16( - %0, - half %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfnmsac.mask.nxv4f16.f16( - , - half, - , - , - i64); - -define @intrinsic_vfnmsac_mask_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv4f16_f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu -; CHECK-NEXT: vfnmsac.vf v8, fa0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsac.mask.nxv4f16.f16( - %0, - half %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfnmsac.nxv8f16.f16( - , - half, - , - i64); - -define @intrinsic_vfnmsac_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv8f16_f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu -; CHECK-NEXT: vfnmsac.vf v8, fa0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsac.nxv8f16.f16( - %0, - half %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfnmsac.mask.nxv8f16.f16( - , - half, - , - , - i64); - -define @intrinsic_vfnmsac_mask_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv8f16_f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu -; CHECK-NEXT: vfnmsac.vf v8, fa0, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsac.mask.nxv8f16.f16( - %0, - half %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfnmsac.nxv16f16.f16( - , - half, - , - i64); - -define @intrinsic_vfnmsac_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv16f16_f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu -; CHECK-NEXT: vfnmsac.vf v8, fa0, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsac.nxv16f16.f16( - %0, - half %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfnmsac.mask.nxv16f16.f16( - , - half, - , - , - i64); - -define @intrinsic_vfnmsac_mask_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv16f16_f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu -; CHECK-NEXT: vfnmsac.vf v8, fa0, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsac.mask.nxv16f16.f16( - %0, - half %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfnmsac.nxv1f32.f32( - , - float, - , - i64); - -define @intrinsic_vfnmsac_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv1f32_f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu -; CHECK-NEXT: vfnmsac.vf v8, fa0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsac.nxv1f32.f32( - %0, - float %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfnmsac.mask.nxv1f32.f32( - , - float, - , - , - i64); - -define @intrinsic_vfnmsac_mask_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv1f32_f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu -; CHECK-NEXT: vfnmsac.vf v8, fa0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsac.mask.nxv1f32.f32( - %0, - float %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfnmsac.nxv2f32.f32( - , - float, - , - i64); - -define @intrinsic_vfnmsac_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv2f32_f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu -; CHECK-NEXT: vfnmsac.vf v8, fa0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsac.nxv2f32.f32( - %0, - float %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfnmsac.mask.nxv2f32.f32( - , - float, - , - , - i64); - -define @intrinsic_vfnmsac_mask_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv2f32_f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu -; CHECK-NEXT: vfnmsac.vf v8, fa0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsac.mask.nxv2f32.f32( - %0, - float %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfnmsac.nxv4f32.f32( - , - float, - , - i64); - -define @intrinsic_vfnmsac_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv4f32_f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu -; CHECK-NEXT: vfnmsac.vf v8, fa0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsac.nxv4f32.f32( - %0, - float %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfnmsac.mask.nxv4f32.f32( - , - float, - , - , - i64); - -define @intrinsic_vfnmsac_mask_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv4f32_f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu -; CHECK-NEXT: vfnmsac.vf v8, fa0, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsac.mask.nxv4f32.f32( - %0, - float %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfnmsac.nxv8f32.f32( - , - float, - , - i64); - -define @intrinsic_vfnmsac_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv8f32_f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu -; CHECK-NEXT: vfnmsac.vf v8, fa0, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsac.nxv8f32.f32( - %0, - float %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfnmsac.mask.nxv8f32.f32( - , - float, - , - , - i64); - -define @intrinsic_vfnmsac_mask_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv8f32_f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu -; CHECK-NEXT: vfnmsac.vf v8, fa0, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsac.mask.nxv8f32.f32( - %0, - float %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfnmsac.nxv1f64.f64( - , - double, - , - i64); - -define @intrinsic_vfnmsac_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv1f64_f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu -; CHECK-NEXT: vfnmsac.vf v8, fa0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsac.nxv1f64.f64( - %0, - double %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfnmsac.mask.nxv1f64.f64( - , - double, - , - , - i64); - -define @intrinsic_vfnmsac_mask_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv1f64_f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu -; CHECK-NEXT: vfnmsac.vf v8, fa0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsac.mask.nxv1f64.f64( - %0, - double %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfnmsac.nxv2f64.f64( - , - double, - , - i64); - -define @intrinsic_vfnmsac_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv2f64_f64_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu -; CHECK-NEXT: vfnmsac.vf v8, fa0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsac.nxv2f64.f64( - %0, - double %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfnmsac.mask.nxv2f64.f64( - , - double, - , - , - i64); - -define @intrinsic_vfnmsac_mask_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv2f64_f64_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu -; CHECK-NEXT: vfnmsac.vf v8, fa0, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsac.mask.nxv2f64.f64( - %0, - double %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfnmsac.nxv4f64.f64( - , - double, - , - i64); - -define @intrinsic_vfnmsac_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv4f64_f64_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu -; CHECK-NEXT: vfnmsac.vf v8, fa0, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsac.nxv4f64.f64( - %0, - double %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfnmsac.mask.nxv4f64.f64( - , - double, - , - , - i64); - -define @intrinsic_vfnmsac_mask_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv4f64_f64_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu -; CHECK-NEXT: vfnmsac.vf v8, fa0, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsac.mask.nxv4f64.f64( - %0, - double %1, - %2, - %3, - i64 %4) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfnmsac-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmsac.ll similarity index 90% rename from llvm/test/CodeGen/RISCV/rvv/vfnmsac-rv32.ll rename to llvm/test/CodeGen/RISCV/rvv/vfnmsac.ll index 834938c7d6e7..ff1bcfa86d3a 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfnmsac-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfnmsac.ll @@ -1,13 +1,15 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: -target-abi=ilp32d < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfnmsac.nxv1f16.nxv1f16( , , , - i32); + iXLen); -define @intrinsic_vfnmsac_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfnmsac_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu @@ -18,7 +20,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -28,9 +30,9 @@ declare @llvm.riscv.vfnmsac.mask.nxv1f16.nxv1f16( , , , - i32); + iXLen); -define @intrinsic_vfnmsac_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfnmsac_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu @@ -42,7 +44,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -51,9 +53,9 @@ declare @llvm.riscv.vfnmsac.nxv2f16.nxv2f16( , , , - i32); + iXLen); -define @intrinsic_vfnmsac_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfnmsac_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu @@ -64,7 +66,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -74,9 +76,9 @@ declare @llvm.riscv.vfnmsac.mask.nxv2f16.nxv2f16( , , , - i32); + iXLen); -define @intrinsic_vfnmsac_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfnmsac_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu @@ -88,7 +90,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -97,9 +99,9 @@ declare @llvm.riscv.vfnmsac.nxv4f16.nxv4f16( , , , - i32); + iXLen); -define @intrinsic_vfnmsac_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfnmsac_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu @@ -110,7 +112,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -120,9 +122,9 @@ declare @llvm.riscv.vfnmsac.mask.nxv4f16.nxv4f16( , , , - i32); + iXLen); -define @intrinsic_vfnmsac_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfnmsac_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu @@ -134,7 +136,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -143,9 +145,9 @@ declare @llvm.riscv.vfnmsac.nxv8f16.nxv8f16( , , , - i32); + iXLen); -define @intrinsic_vfnmsac_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfnmsac_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu @@ -156,7 +158,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -166,9 +168,9 @@ declare @llvm.riscv.vfnmsac.mask.nxv8f16.nxv8f16( , , , - i32); + iXLen); -define @intrinsic_vfnmsac_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfnmsac_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu @@ -180,7 +182,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -189,9 +191,9 @@ declare @llvm.riscv.vfnmsac.nxv16f16.nxv16f16( , , , - i32); + iXLen); -define @intrinsic_vfnmsac_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfnmsac_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu @@ -202,7 +204,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -212,9 +214,9 @@ declare @llvm.riscv.vfnmsac.mask.nxv16f16.nxv16f16( , , , - i32); + iXLen); -define @intrinsic_vfnmsac_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfnmsac_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu @@ -226,7 +228,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -235,9 +237,9 @@ declare @llvm.riscv.vfnmsac.nxv1f32.nxv1f32( , , , - i32); + iXLen); -define @intrinsic_vfnmsac_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfnmsac_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu @@ -248,7 +250,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -258,9 +260,9 @@ declare @llvm.riscv.vfnmsac.mask.nxv1f32.nxv1f32( , , , - i32); + iXLen); -define @intrinsic_vfnmsac_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfnmsac_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu @@ -272,7 +274,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -281,9 +283,9 @@ declare @llvm.riscv.vfnmsac.nxv2f32.nxv2f32( , , , - i32); + iXLen); -define @intrinsic_vfnmsac_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfnmsac_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu @@ -294,7 +296,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -304,9 +306,9 @@ declare @llvm.riscv.vfnmsac.mask.nxv2f32.nxv2f32( , , , - i32); + iXLen); -define @intrinsic_vfnmsac_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfnmsac_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu @@ -318,7 +320,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -327,9 +329,9 @@ declare @llvm.riscv.vfnmsac.nxv4f32.nxv4f32( , , , - i32); + iXLen); -define @intrinsic_vfnmsac_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfnmsac_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu @@ -340,7 +342,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -350,9 +352,9 @@ declare @llvm.riscv.vfnmsac.mask.nxv4f32.nxv4f32( , , , - i32); + iXLen); -define @intrinsic_vfnmsac_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfnmsac_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu @@ -364,7 +366,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -373,9 +375,9 @@ declare @llvm.riscv.vfnmsac.nxv8f32.nxv8f32( , , , - i32); + iXLen); -define @intrinsic_vfnmsac_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfnmsac_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu @@ -386,7 +388,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -396,9 +398,9 @@ declare @llvm.riscv.vfnmsac.mask.nxv8f32.nxv8f32( , , , - i32); + iXLen); -define @intrinsic_vfnmsac_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfnmsac_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu @@ -410,7 +412,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -419,9 +421,9 @@ declare @llvm.riscv.vfnmsac.nxv1f64.nxv1f64( , , , - i32); + iXLen); -define @intrinsic_vfnmsac_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfnmsac_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu @@ -432,7 +434,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -442,9 +444,9 @@ declare @llvm.riscv.vfnmsac.mask.nxv1f64.nxv1f64( , , , - i32); + iXLen); -define @intrinsic_vfnmsac_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfnmsac_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu @@ -456,7 +458,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -465,9 +467,9 @@ declare @llvm.riscv.vfnmsac.nxv2f64.nxv2f64( , , , - i32); + iXLen); -define @intrinsic_vfnmsac_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfnmsac_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu @@ -478,7 +480,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -488,9 +490,9 @@ declare @llvm.riscv.vfnmsac.mask.nxv2f64.nxv2f64( , , , - i32); + iXLen); -define @intrinsic_vfnmsac_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfnmsac_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu @@ -502,7 +504,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -511,9 +513,9 @@ declare @llvm.riscv.vfnmsac.nxv4f64.nxv4f64( , , , - i32); + iXLen); -define @intrinsic_vfnmsac_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfnmsac_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu @@ -524,7 +526,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -534,9 +536,9 @@ declare @llvm.riscv.vfnmsac.mask.nxv4f64.nxv4f64( , , , - i32); + iXLen); -define @intrinsic_vfnmsac_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfnmsac_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu @@ -548,7 +550,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -557,9 +559,9 @@ declare @llvm.riscv.vfnmsac.nxv1f16.f16( , half, , - i32); + iXLen); -define @intrinsic_vfnmsac_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, i32 %3) nounwind { +define @intrinsic_vfnmsac_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv1f16_f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu @@ -570,7 +572,7 @@ entry: %0, half %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -580,9 +582,9 @@ declare @llvm.riscv.vfnmsac.mask.nxv1f16.f16( half, , , - i32); + iXLen); -define @intrinsic_vfnmsac_mask_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfnmsac_mask_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv1f16_f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu @@ -594,7 +596,7 @@ entry: half %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -603,9 +605,9 @@ declare @llvm.riscv.vfnmsac.nxv2f16.f16( , half, , - i32); + iXLen); -define @intrinsic_vfnmsac_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, i32 %3) nounwind { +define @intrinsic_vfnmsac_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv2f16_f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu @@ -616,7 +618,7 @@ entry: %0, half %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -626,9 +628,9 @@ declare @llvm.riscv.vfnmsac.mask.nxv2f16.f16( half, , , - i32); + iXLen); -define @intrinsic_vfnmsac_mask_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfnmsac_mask_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv2f16_f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu @@ -640,7 +642,7 @@ entry: half %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -649,9 +651,9 @@ declare @llvm.riscv.vfnmsac.nxv4f16.f16( , half, , - i32); + iXLen); -define @intrinsic_vfnmsac_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, i32 %3) nounwind { +define @intrinsic_vfnmsac_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv4f16_f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu @@ -662,7 +664,7 @@ entry: %0, half %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -672,9 +674,9 @@ declare @llvm.riscv.vfnmsac.mask.nxv4f16.f16( half, , , - i32); + iXLen); -define @intrinsic_vfnmsac_mask_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfnmsac_mask_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv4f16_f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu @@ -686,7 +688,7 @@ entry: half %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -695,9 +697,9 @@ declare @llvm.riscv.vfnmsac.nxv8f16.f16( , half, , - i32); + iXLen); -define @intrinsic_vfnmsac_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, i32 %3) nounwind { +define @intrinsic_vfnmsac_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv8f16_f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu @@ -708,7 +710,7 @@ entry: %0, half %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -718,9 +720,9 @@ declare @llvm.riscv.vfnmsac.mask.nxv8f16.f16( half, , , - i32); + iXLen); -define @intrinsic_vfnmsac_mask_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfnmsac_mask_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv8f16_f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu @@ -732,7 +734,7 @@ entry: half %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -741,9 +743,9 @@ declare @llvm.riscv.vfnmsac.nxv16f16.f16( , half, , - i32); + iXLen); -define @intrinsic_vfnmsac_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, i32 %3) nounwind { +define @intrinsic_vfnmsac_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv16f16_f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu @@ -754,7 +756,7 @@ entry: %0, half %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -764,9 +766,9 @@ declare @llvm.riscv.vfnmsac.mask.nxv16f16.f16( half, , , - i32); + iXLen); -define @intrinsic_vfnmsac_mask_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfnmsac_mask_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv16f16_f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu @@ -778,7 +780,7 @@ entry: half %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -787,9 +789,9 @@ declare @llvm.riscv.vfnmsac.nxv1f32.f32( , float, , - i32); + iXLen); -define @intrinsic_vfnmsac_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, i32 %3) nounwind { +define @intrinsic_vfnmsac_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv1f32_f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu @@ -800,7 +802,7 @@ entry: %0, float %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -810,9 +812,9 @@ declare @llvm.riscv.vfnmsac.mask.nxv1f32.f32( float, , , - i32); + iXLen); -define @intrinsic_vfnmsac_mask_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfnmsac_mask_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv1f32_f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu @@ -824,7 +826,7 @@ entry: float %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -833,9 +835,9 @@ declare @llvm.riscv.vfnmsac.nxv2f32.f32( , float, , - i32); + iXLen); -define @intrinsic_vfnmsac_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, i32 %3) nounwind { +define @intrinsic_vfnmsac_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv2f32_f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu @@ -846,7 +848,7 @@ entry: %0, float %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -856,9 +858,9 @@ declare @llvm.riscv.vfnmsac.mask.nxv2f32.f32( float, , , - i32); + iXLen); -define @intrinsic_vfnmsac_mask_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfnmsac_mask_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv2f32_f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu @@ -870,7 +872,7 @@ entry: float %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -879,9 +881,9 @@ declare @llvm.riscv.vfnmsac.nxv4f32.f32( , float, , - i32); + iXLen); -define @intrinsic_vfnmsac_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, i32 %3) nounwind { +define @intrinsic_vfnmsac_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv4f32_f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu @@ -892,7 +894,7 @@ entry: %0, float %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -902,9 +904,9 @@ declare @llvm.riscv.vfnmsac.mask.nxv4f32.f32( float, , , - i32); + iXLen); -define @intrinsic_vfnmsac_mask_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfnmsac_mask_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv4f32_f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu @@ -916,7 +918,7 @@ entry: float %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -925,9 +927,9 @@ declare @llvm.riscv.vfnmsac.nxv8f32.f32( , float, , - i32); + iXLen); -define @intrinsic_vfnmsac_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, i32 %3) nounwind { +define @intrinsic_vfnmsac_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv8f32_f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu @@ -938,7 +940,7 @@ entry: %0, float %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -948,9 +950,9 @@ declare @llvm.riscv.vfnmsac.mask.nxv8f32.f32( float, , , - i32); + iXLen); -define @intrinsic_vfnmsac_mask_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfnmsac_mask_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv8f32_f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu @@ -962,7 +964,7 @@ entry: float %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -971,9 +973,9 @@ declare @llvm.riscv.vfnmsac.nxv1f64.f64( , double, , - i32); + iXLen); -define @intrinsic_vfnmsac_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, i32 %3) nounwind { +define @intrinsic_vfnmsac_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv1f64_f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu @@ -984,7 +986,7 @@ entry: %0, double %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -994,9 +996,9 @@ declare @llvm.riscv.vfnmsac.mask.nxv1f64.f64( double, , , - i32); + iXLen); -define @intrinsic_vfnmsac_mask_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfnmsac_mask_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv1f64_f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu @@ -1008,7 +1010,7 @@ entry: double %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -1017,9 +1019,9 @@ declare @llvm.riscv.vfnmsac.nxv2f64.f64( , double, , - i32); + iXLen); -define @intrinsic_vfnmsac_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, i32 %3) nounwind { +define @intrinsic_vfnmsac_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv2f64_f64_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu @@ -1030,7 +1032,7 @@ entry: %0, double %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -1040,9 +1042,9 @@ declare @llvm.riscv.vfnmsac.mask.nxv2f64.f64( double, , , - i32); + iXLen); -define @intrinsic_vfnmsac_mask_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfnmsac_mask_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv2f64_f64_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu @@ -1054,7 +1056,7 @@ entry: double %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -1063,9 +1065,9 @@ declare @llvm.riscv.vfnmsac.nxv4f64.f64( , double, , - i32); + iXLen); -define @intrinsic_vfnmsac_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, i32 %3) nounwind { +define @intrinsic_vfnmsac_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv4f64_f64_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu @@ -1076,7 +1078,7 @@ entry: %0, double %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -1086,9 +1088,9 @@ declare @llvm.riscv.vfnmsac.mask.nxv4f64.f64( double, , , - i32); + iXLen); -define @intrinsic_vfnmsac_mask_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfnmsac_mask_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv4f64_f64_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu @@ -1100,7 +1102,7 @@ entry: double %1, %2, %3, - i32 %4) + iXLen %4) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfnmsub-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmsub-rv32.ll deleted file mode 100644 index 67dbb5a92dfa..000000000000 --- a/llvm/test/CodeGen/RISCV/rvv/vfnmsub-rv32.ll +++ /dev/null @@ -1,1106 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: -target-abi=ilp32d < %s | FileCheck %s -declare @llvm.riscv.vfnmsub.nxv1f16.nxv1f16( - , - , - , - i32); - -define @intrinsic_vfnmsub_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv1f16_nxv1f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu -; CHECK-NEXT: vfnmsub.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsub.nxv1f16.nxv1f16( - %0, - %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vfnmsub.mask.nxv1f16.nxv1f16( - , - , - , - , - i32); - -define @intrinsic_vfnmsub_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv1f16_nxv1f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu -; CHECK-NEXT: vfnmsub.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsub.mask.nxv1f16.nxv1f16( - %0, - %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vfnmsub.nxv2f16.nxv2f16( - , - , - , - i32); - -define @intrinsic_vfnmsub_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv2f16_nxv2f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu -; CHECK-NEXT: vfnmsub.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsub.nxv2f16.nxv2f16( - %0, - %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vfnmsub.mask.nxv2f16.nxv2f16( - , - , - , - , - i32); - -define @intrinsic_vfnmsub_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv2f16_nxv2f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu -; CHECK-NEXT: vfnmsub.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsub.mask.nxv2f16.nxv2f16( - %0, - %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vfnmsub.nxv4f16.nxv4f16( - , - , - , - i32); - -define @intrinsic_vfnmsub_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv4f16_nxv4f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu -; CHECK-NEXT: vfnmsub.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsub.nxv4f16.nxv4f16( - %0, - %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vfnmsub.mask.nxv4f16.nxv4f16( - , - , - , - , - i32); - -define @intrinsic_vfnmsub_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv4f16_nxv4f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu -; CHECK-NEXT: vfnmsub.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsub.mask.nxv4f16.nxv4f16( - %0, - %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vfnmsub.nxv8f16.nxv8f16( - , - , - , - i32); - -define @intrinsic_vfnmsub_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv8f16_nxv8f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu -; CHECK-NEXT: vfnmsub.vv v8, v10, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsub.nxv8f16.nxv8f16( - %0, - %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vfnmsub.mask.nxv8f16.nxv8f16( - , - , - , - , - i32); - -define @intrinsic_vfnmsub_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv8f16_nxv8f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu -; CHECK-NEXT: vfnmsub.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsub.mask.nxv8f16.nxv8f16( - %0, - %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vfnmsub.nxv16f16.nxv16f16( - , - , - , - i32); - -define @intrinsic_vfnmsub_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv16f16_nxv16f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu -; CHECK-NEXT: vfnmsub.vv v8, v12, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsub.nxv16f16.nxv16f16( - %0, - %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vfnmsub.mask.nxv16f16.nxv16f16( - , - , - , - , - i32); - -define @intrinsic_vfnmsub_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv16f16_nxv16f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu -; CHECK-NEXT: vfnmsub.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsub.mask.nxv16f16.nxv16f16( - %0, - %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vfnmsub.nxv1f32.nxv1f32( - , - , - , - i32); - -define @intrinsic_vfnmsub_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv1f32_nxv1f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu -; CHECK-NEXT: vfnmsub.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsub.nxv1f32.nxv1f32( - %0, - %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vfnmsub.mask.nxv1f32.nxv1f32( - , - , - , - , - i32); - -define @intrinsic_vfnmsub_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv1f32_nxv1f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu -; CHECK-NEXT: vfnmsub.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsub.mask.nxv1f32.nxv1f32( - %0, - %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vfnmsub.nxv2f32.nxv2f32( - , - , - , - i32); - -define @intrinsic_vfnmsub_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv2f32_nxv2f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu -; CHECK-NEXT: vfnmsub.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsub.nxv2f32.nxv2f32( - %0, - %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vfnmsub.mask.nxv2f32.nxv2f32( - , - , - , - , - i32); - -define @intrinsic_vfnmsub_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv2f32_nxv2f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu -; CHECK-NEXT: vfnmsub.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsub.mask.nxv2f32.nxv2f32( - %0, - %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vfnmsub.nxv4f32.nxv4f32( - , - , - , - i32); - -define @intrinsic_vfnmsub_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv4f32_nxv4f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu -; CHECK-NEXT: vfnmsub.vv v8, v10, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsub.nxv4f32.nxv4f32( - %0, - %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vfnmsub.mask.nxv4f32.nxv4f32( - , - , - , - , - i32); - -define @intrinsic_vfnmsub_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv4f32_nxv4f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu -; CHECK-NEXT: vfnmsub.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsub.mask.nxv4f32.nxv4f32( - %0, - %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vfnmsub.nxv8f32.nxv8f32( - , - , - , - i32); - -define @intrinsic_vfnmsub_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv8f32_nxv8f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu -; CHECK-NEXT: vfnmsub.vv v8, v12, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsub.nxv8f32.nxv8f32( - %0, - %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vfnmsub.mask.nxv8f32.nxv8f32( - , - , - , - , - i32); - -define @intrinsic_vfnmsub_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv8f32_nxv8f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu -; CHECK-NEXT: vfnmsub.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsub.mask.nxv8f32.nxv8f32( - %0, - %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vfnmsub.nxv1f64.nxv1f64( - , - , - , - i32); - -define @intrinsic_vfnmsub_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv1f64_nxv1f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu -; CHECK-NEXT: vfnmsub.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsub.nxv1f64.nxv1f64( - %0, - %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vfnmsub.mask.nxv1f64.nxv1f64( - , - , - , - , - i32); - -define @intrinsic_vfnmsub_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv1f64_nxv1f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu -; CHECK-NEXT: vfnmsub.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsub.mask.nxv1f64.nxv1f64( - %0, - %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vfnmsub.nxv2f64.nxv2f64( - , - , - , - i32); - -define @intrinsic_vfnmsub_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv2f64_nxv2f64_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu -; CHECK-NEXT: vfnmsub.vv v8, v10, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsub.nxv2f64.nxv2f64( - %0, - %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vfnmsub.mask.nxv2f64.nxv2f64( - , - , - , - , - i32); - -define @intrinsic_vfnmsub_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv2f64_nxv2f64_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu -; CHECK-NEXT: vfnmsub.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsub.mask.nxv2f64.nxv2f64( - %0, - %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vfnmsub.nxv4f64.nxv4f64( - , - , - , - i32); - -define @intrinsic_vfnmsub_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv4f64_nxv4f64_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu -; CHECK-NEXT: vfnmsub.vv v8, v12, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsub.nxv4f64.nxv4f64( - %0, - %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vfnmsub.mask.nxv4f64.nxv4f64( - , - , - , - , - i32); - -define @intrinsic_vfnmsub_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv4f64_nxv4f64_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu -; CHECK-NEXT: vfnmsub.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsub.mask.nxv4f64.nxv4f64( - %0, - %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vfnmsub.nxv1f16.f16( - , - half, - , - i32); - -define @intrinsic_vfnmsub_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv1f16_f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu -; CHECK-NEXT: vfnmsub.vf v8, fa0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsub.nxv1f16.f16( - %0, - half %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vfnmsub.mask.nxv1f16.f16( - , - half, - , - , - i32); - -define @intrinsic_vfnmsub_mask_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv1f16_f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu -; CHECK-NEXT: vfnmsub.vf v8, fa0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsub.mask.nxv1f16.f16( - %0, - half %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vfnmsub.nxv2f16.f16( - , - half, - , - i32); - -define @intrinsic_vfnmsub_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv2f16_f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu -; CHECK-NEXT: vfnmsub.vf v8, fa0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsub.nxv2f16.f16( - %0, - half %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vfnmsub.mask.nxv2f16.f16( - , - half, - , - , - i32); - -define @intrinsic_vfnmsub_mask_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv2f16_f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu -; CHECK-NEXT: vfnmsub.vf v8, fa0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsub.mask.nxv2f16.f16( - %0, - half %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vfnmsub.nxv4f16.f16( - , - half, - , - i32); - -define @intrinsic_vfnmsub_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv4f16_f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu -; CHECK-NEXT: vfnmsub.vf v8, fa0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsub.nxv4f16.f16( - %0, - half %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vfnmsub.mask.nxv4f16.f16( - , - half, - , - , - i32); - -define @intrinsic_vfnmsub_mask_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv4f16_f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu -; CHECK-NEXT: vfnmsub.vf v8, fa0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsub.mask.nxv4f16.f16( - %0, - half %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vfnmsub.nxv8f16.f16( - , - half, - , - i32); - -define @intrinsic_vfnmsub_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv8f16_f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu -; CHECK-NEXT: vfnmsub.vf v8, fa0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsub.nxv8f16.f16( - %0, - half %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vfnmsub.mask.nxv8f16.f16( - , - half, - , - , - i32); - -define @intrinsic_vfnmsub_mask_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv8f16_f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu -; CHECK-NEXT: vfnmsub.vf v8, fa0, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsub.mask.nxv8f16.f16( - %0, - half %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vfnmsub.nxv16f16.f16( - , - half, - , - i32); - -define @intrinsic_vfnmsub_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv16f16_f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu -; CHECK-NEXT: vfnmsub.vf v8, fa0, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsub.nxv16f16.f16( - %0, - half %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vfnmsub.mask.nxv16f16.f16( - , - half, - , - , - i32); - -define @intrinsic_vfnmsub_mask_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv16f16_f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu -; CHECK-NEXT: vfnmsub.vf v8, fa0, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsub.mask.nxv16f16.f16( - %0, - half %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vfnmsub.nxv1f32.f32( - , - float, - , - i32); - -define @intrinsic_vfnmsub_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv1f32_f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu -; CHECK-NEXT: vfnmsub.vf v8, fa0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsub.nxv1f32.f32( - %0, - float %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vfnmsub.mask.nxv1f32.f32( - , - float, - , - , - i32); - -define @intrinsic_vfnmsub_mask_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv1f32_f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu -; CHECK-NEXT: vfnmsub.vf v8, fa0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsub.mask.nxv1f32.f32( - %0, - float %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vfnmsub.nxv2f32.f32( - , - float, - , - i32); - -define @intrinsic_vfnmsub_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv2f32_f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu -; CHECK-NEXT: vfnmsub.vf v8, fa0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsub.nxv2f32.f32( - %0, - float %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vfnmsub.mask.nxv2f32.f32( - , - float, - , - , - i32); - -define @intrinsic_vfnmsub_mask_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv2f32_f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu -; CHECK-NEXT: vfnmsub.vf v8, fa0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsub.mask.nxv2f32.f32( - %0, - float %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vfnmsub.nxv4f32.f32( - , - float, - , - i32); - -define @intrinsic_vfnmsub_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv4f32_f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu -; CHECK-NEXT: vfnmsub.vf v8, fa0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsub.nxv4f32.f32( - %0, - float %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vfnmsub.mask.nxv4f32.f32( - , - float, - , - , - i32); - -define @intrinsic_vfnmsub_mask_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv4f32_f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu -; CHECK-NEXT: vfnmsub.vf v8, fa0, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsub.mask.nxv4f32.f32( - %0, - float %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vfnmsub.nxv8f32.f32( - , - float, - , - i32); - -define @intrinsic_vfnmsub_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv8f32_f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu -; CHECK-NEXT: vfnmsub.vf v8, fa0, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsub.nxv8f32.f32( - %0, - float %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vfnmsub.mask.nxv8f32.f32( - , - float, - , - , - i32); - -define @intrinsic_vfnmsub_mask_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv8f32_f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu -; CHECK-NEXT: vfnmsub.vf v8, fa0, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsub.mask.nxv8f32.f32( - %0, - float %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vfnmsub.nxv1f64.f64( - , - double, - , - i32); - -define @intrinsic_vfnmsub_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv1f64_f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu -; CHECK-NEXT: vfnmsub.vf v8, fa0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsub.nxv1f64.f64( - %0, - double %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vfnmsub.mask.nxv1f64.f64( - , - double, - , - , - i32); - -define @intrinsic_vfnmsub_mask_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv1f64_f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu -; CHECK-NEXT: vfnmsub.vf v8, fa0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsub.mask.nxv1f64.f64( - %0, - double %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vfnmsub.nxv2f64.f64( - , - double, - , - i32); - -define @intrinsic_vfnmsub_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv2f64_f64_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu -; CHECK-NEXT: vfnmsub.vf v8, fa0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsub.nxv2f64.f64( - %0, - double %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vfnmsub.mask.nxv2f64.f64( - , - double, - , - , - i32); - -define @intrinsic_vfnmsub_mask_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv2f64_f64_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu -; CHECK-NEXT: vfnmsub.vf v8, fa0, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsub.mask.nxv2f64.f64( - %0, - double %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vfnmsub.nxv4f64.f64( - , - double, - , - i32); - -define @intrinsic_vfnmsub_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv4f64_f64_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu -; CHECK-NEXT: vfnmsub.vf v8, fa0, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsub.nxv4f64.f64( - %0, - double %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vfnmsub.mask.nxv4f64.f64( - , - double, - , - , - i32); - -define @intrinsic_vfnmsub_mask_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv4f64_f64_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu -; CHECK-NEXT: vfnmsub.vf v8, fa0, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfnmsub.mask.nxv4f64.f64( - %0, - double %1, - %2, - %3, - i32 %4) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfnmsub-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmsub.ll similarity index 90% rename from llvm/test/CodeGen/RISCV/rvv/vfnmsub-rv64.ll rename to llvm/test/CodeGen/RISCV/rvv/vfnmsub.ll index 07b23dbfb066..e6ca32f34752 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfnmsub-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfnmsub.ll @@ -1,13 +1,15 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: -target-abi=lp64d < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfnmsub.nxv1f16.nxv1f16( , , , - i64); + iXLen); -define @intrinsic_vfnmsub_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vfnmsub_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu @@ -18,7 +20,7 @@ entry: %0, %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -28,9 +30,9 @@ declare @llvm.riscv.vfnmsub.mask.nxv1f16.nxv1f16( , , , - i64); + iXLen); -define @intrinsic_vfnmsub_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfnmsub_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu @@ -42,7 +44,7 @@ entry: %1, %2, %3, - i64 %4) + iXLen %4) ret %a } @@ -51,9 +53,9 @@ declare @llvm.riscv.vfnmsub.nxv2f16.nxv2f16( , , , - i64); + iXLen); -define @intrinsic_vfnmsub_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vfnmsub_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu @@ -64,7 +66,7 @@ entry: %0, %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -74,9 +76,9 @@ declare @llvm.riscv.vfnmsub.mask.nxv2f16.nxv2f16( , , , - i64); + iXLen); -define @intrinsic_vfnmsub_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfnmsub_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu @@ -88,7 +90,7 @@ entry: %1, %2, %3, - i64 %4) + iXLen %4) ret %a } @@ -97,9 +99,9 @@ declare @llvm.riscv.vfnmsub.nxv4f16.nxv4f16( , , , - i64); + iXLen); -define @intrinsic_vfnmsub_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vfnmsub_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu @@ -110,7 +112,7 @@ entry: %0, %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -120,9 +122,9 @@ declare @llvm.riscv.vfnmsub.mask.nxv4f16.nxv4f16( , , , - i64); + iXLen); -define @intrinsic_vfnmsub_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfnmsub_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu @@ -134,7 +136,7 @@ entry: %1, %2, %3, - i64 %4) + iXLen %4) ret %a } @@ -143,9 +145,9 @@ declare @llvm.riscv.vfnmsub.nxv8f16.nxv8f16( , , , - i64); + iXLen); -define @intrinsic_vfnmsub_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vfnmsub_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu @@ -156,7 +158,7 @@ entry: %0, %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -166,9 +168,9 @@ declare @llvm.riscv.vfnmsub.mask.nxv8f16.nxv8f16( , , , - i64); + iXLen); -define @intrinsic_vfnmsub_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfnmsub_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu @@ -180,7 +182,7 @@ entry: %1, %2, %3, - i64 %4) + iXLen %4) ret %a } @@ -189,9 +191,9 @@ declare @llvm.riscv.vfnmsub.nxv16f16.nxv16f16( , , , - i64); + iXLen); -define @intrinsic_vfnmsub_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vfnmsub_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu @@ -202,7 +204,7 @@ entry: %0, %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -212,9 +214,9 @@ declare @llvm.riscv.vfnmsub.mask.nxv16f16.nxv16f16( , , , - i64); + iXLen); -define @intrinsic_vfnmsub_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfnmsub_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu @@ -226,7 +228,7 @@ entry: %1, %2, %3, - i64 %4) + iXLen %4) ret %a } @@ -235,9 +237,9 @@ declare @llvm.riscv.vfnmsub.nxv1f32.nxv1f32( , , , - i64); + iXLen); -define @intrinsic_vfnmsub_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vfnmsub_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu @@ -248,7 +250,7 @@ entry: %0, %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -258,9 +260,9 @@ declare @llvm.riscv.vfnmsub.mask.nxv1f32.nxv1f32( , , , - i64); + iXLen); -define @intrinsic_vfnmsub_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfnmsub_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu @@ -272,7 +274,7 @@ entry: %1, %2, %3, - i64 %4) + iXLen %4) ret %a } @@ -281,9 +283,9 @@ declare @llvm.riscv.vfnmsub.nxv2f32.nxv2f32( , , , - i64); + iXLen); -define @intrinsic_vfnmsub_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vfnmsub_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu @@ -294,7 +296,7 @@ entry: %0, %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -304,9 +306,9 @@ declare @llvm.riscv.vfnmsub.mask.nxv2f32.nxv2f32( , , , - i64); + iXLen); -define @intrinsic_vfnmsub_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfnmsub_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu @@ -318,7 +320,7 @@ entry: %1, %2, %3, - i64 %4) + iXLen %4) ret %a } @@ -327,9 +329,9 @@ declare @llvm.riscv.vfnmsub.nxv4f32.nxv4f32( , , , - i64); + iXLen); -define @intrinsic_vfnmsub_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vfnmsub_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu @@ -340,7 +342,7 @@ entry: %0, %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -350,9 +352,9 @@ declare @llvm.riscv.vfnmsub.mask.nxv4f32.nxv4f32( , , , - i64); + iXLen); -define @intrinsic_vfnmsub_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfnmsub_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu @@ -364,7 +366,7 @@ entry: %1, %2, %3, - i64 %4) + iXLen %4) ret %a } @@ -373,9 +375,9 @@ declare @llvm.riscv.vfnmsub.nxv8f32.nxv8f32( , , , - i64); + iXLen); -define @intrinsic_vfnmsub_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vfnmsub_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu @@ -386,7 +388,7 @@ entry: %0, %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -396,9 +398,9 @@ declare @llvm.riscv.vfnmsub.mask.nxv8f32.nxv8f32( , , , - i64); + iXLen); -define @intrinsic_vfnmsub_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfnmsub_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu @@ -410,7 +412,7 @@ entry: %1, %2, %3, - i64 %4) + iXLen %4) ret %a } @@ -419,9 +421,9 @@ declare @llvm.riscv.vfnmsub.nxv1f64.nxv1f64( , , , - i64); + iXLen); -define @intrinsic_vfnmsub_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vfnmsub_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu @@ -432,7 +434,7 @@ entry: %0, %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -442,9 +444,9 @@ declare @llvm.riscv.vfnmsub.mask.nxv1f64.nxv1f64( , , , - i64); + iXLen); -define @intrinsic_vfnmsub_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfnmsub_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu @@ -456,7 +458,7 @@ entry: %1, %2, %3, - i64 %4) + iXLen %4) ret %a } @@ -465,9 +467,9 @@ declare @llvm.riscv.vfnmsub.nxv2f64.nxv2f64( , , , - i64); + iXLen); -define @intrinsic_vfnmsub_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vfnmsub_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu @@ -478,7 +480,7 @@ entry: %0, %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -488,9 +490,9 @@ declare @llvm.riscv.vfnmsub.mask.nxv2f64.nxv2f64( , , , - i64); + iXLen); -define @intrinsic_vfnmsub_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfnmsub_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu @@ -502,7 +504,7 @@ entry: %1, %2, %3, - i64 %4) + iXLen %4) ret %a } @@ -511,9 +513,9 @@ declare @llvm.riscv.vfnmsub.nxv4f64.nxv4f64( , , , - i64); + iXLen); -define @intrinsic_vfnmsub_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vfnmsub_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu @@ -524,7 +526,7 @@ entry: %0, %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -534,9 +536,9 @@ declare @llvm.riscv.vfnmsub.mask.nxv4f64.nxv4f64( , , , - i64); + iXLen); -define @intrinsic_vfnmsub_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfnmsub_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu @@ -548,7 +550,7 @@ entry: %1, %2, %3, - i64 %4) + iXLen %4) ret %a } @@ -557,9 +559,9 @@ declare @llvm.riscv.vfnmsub.nxv1f16.f16( , half, , - i64); + iXLen); -define @intrinsic_vfnmsub_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, i64 %3) nounwind { +define @intrinsic_vfnmsub_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv1f16_f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu @@ -570,7 +572,7 @@ entry: %0, half %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -580,9 +582,9 @@ declare @llvm.riscv.vfnmsub.mask.nxv1f16.f16( half, , , - i64); + iXLen); -define @intrinsic_vfnmsub_mask_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfnmsub_mask_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv1f16_f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu @@ -594,7 +596,7 @@ entry: half %1, %2, %3, - i64 %4) + iXLen %4) ret %a } @@ -603,9 +605,9 @@ declare @llvm.riscv.vfnmsub.nxv2f16.f16( , half, , - i64); + iXLen); -define @intrinsic_vfnmsub_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, i64 %3) nounwind { +define @intrinsic_vfnmsub_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv2f16_f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu @@ -616,7 +618,7 @@ entry: %0, half %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -626,9 +628,9 @@ declare @llvm.riscv.vfnmsub.mask.nxv2f16.f16( half, , , - i64); + iXLen); -define @intrinsic_vfnmsub_mask_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfnmsub_mask_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv2f16_f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu @@ -640,7 +642,7 @@ entry: half %1, %2, %3, - i64 %4) + iXLen %4) ret %a } @@ -649,9 +651,9 @@ declare @llvm.riscv.vfnmsub.nxv4f16.f16( , half, , - i64); + iXLen); -define @intrinsic_vfnmsub_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, i64 %3) nounwind { +define @intrinsic_vfnmsub_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv4f16_f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu @@ -662,7 +664,7 @@ entry: %0, half %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -672,9 +674,9 @@ declare @llvm.riscv.vfnmsub.mask.nxv4f16.f16( half, , , - i64); + iXLen); -define @intrinsic_vfnmsub_mask_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfnmsub_mask_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv4f16_f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu @@ -686,7 +688,7 @@ entry: half %1, %2, %3, - i64 %4) + iXLen %4) ret %a } @@ -695,9 +697,9 @@ declare @llvm.riscv.vfnmsub.nxv8f16.f16( , half, , - i64); + iXLen); -define @intrinsic_vfnmsub_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, i64 %3) nounwind { +define @intrinsic_vfnmsub_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv8f16_f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu @@ -708,7 +710,7 @@ entry: %0, half %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -718,9 +720,9 @@ declare @llvm.riscv.vfnmsub.mask.nxv8f16.f16( half, , , - i64); + iXLen); -define @intrinsic_vfnmsub_mask_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfnmsub_mask_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv8f16_f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu @@ -732,7 +734,7 @@ entry: half %1, %2, %3, - i64 %4) + iXLen %4) ret %a } @@ -741,9 +743,9 @@ declare @llvm.riscv.vfnmsub.nxv16f16.f16( , half, , - i64); + iXLen); -define @intrinsic_vfnmsub_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, i64 %3) nounwind { +define @intrinsic_vfnmsub_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv16f16_f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu @@ -754,7 +756,7 @@ entry: %0, half %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -764,9 +766,9 @@ declare @llvm.riscv.vfnmsub.mask.nxv16f16.f16( half, , , - i64); + iXLen); -define @intrinsic_vfnmsub_mask_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfnmsub_mask_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv16f16_f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu @@ -778,7 +780,7 @@ entry: half %1, %2, %3, - i64 %4) + iXLen %4) ret %a } @@ -787,9 +789,9 @@ declare @llvm.riscv.vfnmsub.nxv1f32.f32( , float, , - i64); + iXLen); -define @intrinsic_vfnmsub_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, i64 %3) nounwind { +define @intrinsic_vfnmsub_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv1f32_f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu @@ -800,7 +802,7 @@ entry: %0, float %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -810,9 +812,9 @@ declare @llvm.riscv.vfnmsub.mask.nxv1f32.f32( float, , , - i64); + iXLen); -define @intrinsic_vfnmsub_mask_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfnmsub_mask_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv1f32_f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu @@ -824,7 +826,7 @@ entry: float %1, %2, %3, - i64 %4) + iXLen %4) ret %a } @@ -833,9 +835,9 @@ declare @llvm.riscv.vfnmsub.nxv2f32.f32( , float, , - i64); + iXLen); -define @intrinsic_vfnmsub_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, i64 %3) nounwind { +define @intrinsic_vfnmsub_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv2f32_f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu @@ -846,7 +848,7 @@ entry: %0, float %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -856,9 +858,9 @@ declare @llvm.riscv.vfnmsub.mask.nxv2f32.f32( float, , , - i64); + iXLen); -define @intrinsic_vfnmsub_mask_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfnmsub_mask_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv2f32_f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu @@ -870,7 +872,7 @@ entry: float %1, %2, %3, - i64 %4) + iXLen %4) ret %a } @@ -879,9 +881,9 @@ declare @llvm.riscv.vfnmsub.nxv4f32.f32( , float, , - i64); + iXLen); -define @intrinsic_vfnmsub_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, i64 %3) nounwind { +define @intrinsic_vfnmsub_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv4f32_f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu @@ -892,7 +894,7 @@ entry: %0, float %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -902,9 +904,9 @@ declare @llvm.riscv.vfnmsub.mask.nxv4f32.f32( float, , , - i64); + iXLen); -define @intrinsic_vfnmsub_mask_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfnmsub_mask_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv4f32_f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu @@ -916,7 +918,7 @@ entry: float %1, %2, %3, - i64 %4) + iXLen %4) ret %a } @@ -925,9 +927,9 @@ declare @llvm.riscv.vfnmsub.nxv8f32.f32( , float, , - i64); + iXLen); -define @intrinsic_vfnmsub_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, i64 %3) nounwind { +define @intrinsic_vfnmsub_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv8f32_f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu @@ -938,7 +940,7 @@ entry: %0, float %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -948,9 +950,9 @@ declare @llvm.riscv.vfnmsub.mask.nxv8f32.f32( float, , , - i64); + iXLen); -define @intrinsic_vfnmsub_mask_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfnmsub_mask_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv8f32_f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu @@ -962,7 +964,7 @@ entry: float %1, %2, %3, - i64 %4) + iXLen %4) ret %a } @@ -971,9 +973,9 @@ declare @llvm.riscv.vfnmsub.nxv1f64.f64( , double, , - i64); + iXLen); -define @intrinsic_vfnmsub_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, i64 %3) nounwind { +define @intrinsic_vfnmsub_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv1f64_f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu @@ -984,7 +986,7 @@ entry: %0, double %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -994,9 +996,9 @@ declare @llvm.riscv.vfnmsub.mask.nxv1f64.f64( double, , , - i64); + iXLen); -define @intrinsic_vfnmsub_mask_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfnmsub_mask_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv1f64_f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu @@ -1008,7 +1010,7 @@ entry: double %1, %2, %3, - i64 %4) + iXLen %4) ret %a } @@ -1017,9 +1019,9 @@ declare @llvm.riscv.vfnmsub.nxv2f64.f64( , double, , - i64); + iXLen); -define @intrinsic_vfnmsub_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, i64 %3) nounwind { +define @intrinsic_vfnmsub_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv2f64_f64_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu @@ -1030,7 +1032,7 @@ entry: %0, double %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -1040,9 +1042,9 @@ declare @llvm.riscv.vfnmsub.mask.nxv2f64.f64( double, , , - i64); + iXLen); -define @intrinsic_vfnmsub_mask_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfnmsub_mask_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv2f64_f64_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu @@ -1054,7 +1056,7 @@ entry: double %1, %2, %3, - i64 %4) + iXLen %4) ret %a } @@ -1063,9 +1065,9 @@ declare @llvm.riscv.vfnmsub.nxv4f64.f64( , double, , - i64); + iXLen); -define @intrinsic_vfnmsub_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, i64 %3) nounwind { +define @intrinsic_vfnmsub_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv4f64_f64_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu @@ -1076,7 +1078,7 @@ entry: %0, double %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -1086,9 +1088,9 @@ declare @llvm.riscv.vfnmsub.mask.nxv4f64.f64( double, , , - i64); + iXLen); -define @intrinsic_vfnmsub_mask_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfnmsub_mask_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv4f64_f64_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu @@ -1100,7 +1102,7 @@ entry: double %1, %2, %3, - i64 %4) + iXLen %4) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfrdiv-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfrdiv-rv64.ll deleted file mode 100644 index ccdd6ad37189..000000000000 --- a/llvm/test/CodeGen/RISCV/rvv/vfrdiv-rv64.ll +++ /dev/null @@ -1,677 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: -target-abi=lp64d < %s | FileCheck %s -declare @llvm.riscv.vfrdiv.nxv1f16.f16( - , - half, - i64); - -define @intrinsic_vfrdiv_vf_nxv1f16_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv1f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrdiv.nxv1f16.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfrdiv.mask.nxv1f16.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfrdiv_mask_vf_nxv1f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv1f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfrdiv.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrdiv.mask.nxv1f16.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfrdiv.nxv2f16.f16( - , - half, - i64); - -define @intrinsic_vfrdiv_vf_nxv2f16_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv2f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrdiv.nxv2f16.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfrdiv.mask.nxv2f16.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfrdiv_mask_vf_nxv2f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv2f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfrdiv.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrdiv.mask.nxv2f16.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfrdiv.nxv4f16.f16( - , - half, - i64); - -define @intrinsic_vfrdiv_vf_nxv4f16_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv4f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrdiv.nxv4f16.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfrdiv.mask.nxv4f16.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfrdiv_mask_vf_nxv4f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv4f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfrdiv.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrdiv.mask.nxv4f16.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfrdiv.nxv8f16.f16( - , - half, - i64); - -define @intrinsic_vfrdiv_vf_nxv8f16_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv8f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrdiv.nxv8f16.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfrdiv.mask.nxv8f16.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfrdiv_mask_vf_nxv8f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv8f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfrdiv.vf v8, v10, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrdiv.mask.nxv8f16.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfrdiv.nxv16f16.f16( - , - half, - i64); - -define @intrinsic_vfrdiv_vf_nxv16f16_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv16f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrdiv.nxv16f16.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfrdiv.mask.nxv16f16.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfrdiv_mask_vf_nxv16f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv16f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfrdiv.vf v8, v12, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrdiv.mask.nxv16f16.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfrdiv.nxv32f16.f16( - , - half, - i64); - -define @intrinsic_vfrdiv_vf_nxv32f16_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv32f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu -; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrdiv.nxv32f16.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfrdiv.mask.nxv32f16.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfrdiv_mask_vf_nxv32f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv32f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu -; CHECK-NEXT: vfrdiv.vf v8, v16, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrdiv.mask.nxv32f16.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfrdiv.nxv1f32.f32( - , - float, - i64); - -define @intrinsic_vfrdiv_vf_nxv1f32_f32( %0, float %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv1f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrdiv.nxv1f32.f32( - %0, - float %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfrdiv.mask.nxv1f32.f32( - , - , - float, - , - i64, - i64); - -define @intrinsic_vfrdiv_mask_vf_nxv1f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv1f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfrdiv.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrdiv.mask.nxv1f32.f32( - %0, - %1, - float %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfrdiv.nxv2f32.f32( - , - float, - i64); - -define @intrinsic_vfrdiv_vf_nxv2f32_f32( %0, float %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv2f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrdiv.nxv2f32.f32( - %0, - float %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfrdiv.mask.nxv2f32.f32( - , - , - float, - , - i64, - i64); - -define @intrinsic_vfrdiv_mask_vf_nxv2f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv2f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfrdiv.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrdiv.mask.nxv2f32.f32( - %0, - %1, - float %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfrdiv.nxv4f32.f32( - , - float, - i64); - -define @intrinsic_vfrdiv_vf_nxv4f32_f32( %0, float %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv4f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrdiv.nxv4f32.f32( - %0, - float %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfrdiv.mask.nxv4f32.f32( - , - , - float, - , - i64, - i64); - -define @intrinsic_vfrdiv_mask_vf_nxv4f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv4f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfrdiv.vf v8, v10, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrdiv.mask.nxv4f32.f32( - %0, - %1, - float %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfrdiv.nxv8f32.f32( - , - float, - i64); - -define @intrinsic_vfrdiv_vf_nxv8f32_f32( %0, float %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv8f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrdiv.nxv8f32.f32( - %0, - float %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfrdiv.mask.nxv8f32.f32( - , - , - float, - , - i64, - i64); - -define @intrinsic_vfrdiv_mask_vf_nxv8f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv8f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfrdiv.vf v8, v12, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrdiv.mask.nxv8f32.f32( - %0, - %1, - float %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfrdiv.nxv16f32.f32( - , - float, - i64); - -define @intrinsic_vfrdiv_vf_nxv16f32_f32( %0, float %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv16f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu -; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrdiv.nxv16f32.f32( - %0, - float %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfrdiv.mask.nxv16f32.f32( - , - , - float, - , - i64, - i64); - -define @intrinsic_vfrdiv_mask_vf_nxv16f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv16f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu -; CHECK-NEXT: vfrdiv.vf v8, v16, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrdiv.mask.nxv16f32.f32( - %0, - %1, - float %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfrdiv.nxv1f64.f64( - , - double, - i64); - -define @intrinsic_vfrdiv_vf_nxv1f64_f64( %0, double %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv1f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrdiv.nxv1f64.f64( - %0, - double %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfrdiv.mask.nxv1f64.f64( - , - , - double, - , - i64, - i64); - -define @intrinsic_vfrdiv_mask_vf_nxv1f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv1f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vfrdiv.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrdiv.mask.nxv1f64.f64( - %0, - %1, - double %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfrdiv.nxv2f64.f64( - , - double, - i64); - -define @intrinsic_vfrdiv_vf_nxv2f64_f64( %0, double %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv2f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrdiv.nxv2f64.f64( - %0, - double %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfrdiv.mask.nxv2f64.f64( - , - , - double, - , - i64, - i64); - -define @intrinsic_vfrdiv_mask_vf_nxv2f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv2f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vfrdiv.vf v8, v10, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrdiv.mask.nxv2f64.f64( - %0, - %1, - double %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfrdiv.nxv4f64.f64( - , - double, - i64); - -define @intrinsic_vfrdiv_vf_nxv4f64_f64( %0, double %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv4f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrdiv.nxv4f64.f64( - %0, - double %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfrdiv.mask.nxv4f64.f64( - , - , - double, - , - i64, - i64); - -define @intrinsic_vfrdiv_mask_vf_nxv4f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv4f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vfrdiv.vf v8, v12, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrdiv.mask.nxv4f64.f64( - %0, - %1, - double %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfrdiv.nxv8f64.f64( - , - double, - i64); - -define @intrinsic_vfrdiv_vf_nxv8f64_f64( %0, double %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv8f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu -; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrdiv.nxv8f64.f64( - %0, - double %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfrdiv.mask.nxv8f64.f64( - , - , - double, - , - i64, - i64); - -define @intrinsic_vfrdiv_mask_vf_nxv8f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv8f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu -; CHECK-NEXT: vfrdiv.vf v8, v16, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrdiv.mask.nxv8f64.f64( - %0, - %1, - double %2, - %3, - i64 %4, i64 1) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfrdiv-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfrdiv.ll similarity index 86% rename from llvm/test/CodeGen/RISCV/rvv/vfrdiv-rv32.ll rename to llvm/test/CodeGen/RISCV/rvv/vfrdiv.ll index 1d502c84b198..58dc39b99505 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfrdiv-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfrdiv.ll @@ -1,12 +1,14 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: -target-abi=ilp32d < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfrdiv.nxv1f16.f16( , half, - i32); + iXLen); -define @intrinsic_vfrdiv_vf_nxv1f16_nxv1f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfrdiv_vf_nxv1f16_nxv1f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -16,7 +18,7 @@ entry: %a = call @llvm.riscv.vfrdiv.nxv1f16.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -26,10 +28,10 @@ declare @llvm.riscv.vfrdiv.mask.nxv1f16.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfrdiv_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfrdiv_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -41,7 +43,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -49,9 +51,9 @@ entry: declare @llvm.riscv.vfrdiv.nxv2f16.f16( , half, - i32); + iXLen); -define @intrinsic_vfrdiv_vf_nxv2f16_nxv2f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfrdiv_vf_nxv2f16_nxv2f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -61,7 +63,7 @@ entry: %a = call @llvm.riscv.vfrdiv.nxv2f16.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -71,10 +73,10 @@ declare @llvm.riscv.vfrdiv.mask.nxv2f16.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfrdiv_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfrdiv_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -86,7 +88,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -94,9 +96,9 @@ entry: declare @llvm.riscv.vfrdiv.nxv4f16.f16( , half, - i32); + iXLen); -define @intrinsic_vfrdiv_vf_nxv4f16_nxv4f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfrdiv_vf_nxv4f16_nxv4f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -106,7 +108,7 @@ entry: %a = call @llvm.riscv.vfrdiv.nxv4f16.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -116,10 +118,10 @@ declare @llvm.riscv.vfrdiv.mask.nxv4f16.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfrdiv_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfrdiv_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -131,7 +133,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -139,9 +141,9 @@ entry: declare @llvm.riscv.vfrdiv.nxv8f16.f16( , half, - i32); + iXLen); -define @intrinsic_vfrdiv_vf_nxv8f16_nxv8f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfrdiv_vf_nxv8f16_nxv8f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -151,7 +153,7 @@ entry: %a = call @llvm.riscv.vfrdiv.nxv8f16.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -161,10 +163,10 @@ declare @llvm.riscv.vfrdiv.mask.nxv8f16.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfrdiv_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfrdiv_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -176,7 +178,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -184,9 +186,9 @@ entry: declare @llvm.riscv.vfrdiv.nxv16f16.f16( , half, - i32); + iXLen); -define @intrinsic_vfrdiv_vf_nxv16f16_nxv16f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfrdiv_vf_nxv16f16_nxv16f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -196,7 +198,7 @@ entry: %a = call @llvm.riscv.vfrdiv.nxv16f16.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -206,10 +208,10 @@ declare @llvm.riscv.vfrdiv.mask.nxv16f16.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfrdiv_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfrdiv_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -221,7 +223,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -229,9 +231,9 @@ entry: declare @llvm.riscv.vfrdiv.nxv32f16.f16( , half, - i32); + iXLen); -define @intrinsic_vfrdiv_vf_nxv32f16_nxv32f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfrdiv_vf_nxv32f16_nxv32f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu @@ -241,7 +243,7 @@ entry: %a = call @llvm.riscv.vfrdiv.nxv32f16.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -251,10 +253,10 @@ declare @llvm.riscv.vfrdiv.mask.nxv32f16.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfrdiv_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfrdiv_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu @@ -266,7 +268,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -274,9 +276,9 @@ entry: declare @llvm.riscv.vfrdiv.nxv1f32.f32( , float, - i32); + iXLen); -define @intrinsic_vfrdiv_vf_nxv1f32_nxv1f32_f32( %0, float %1, i32 %2) nounwind { +define @intrinsic_vfrdiv_vf_nxv1f32_nxv1f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -286,7 +288,7 @@ entry: %a = call @llvm.riscv.vfrdiv.nxv1f32.f32( %0, float %1, - i32 %2) + iXLen %2) ret %a } @@ -296,10 +298,10 @@ declare @llvm.riscv.vfrdiv.mask.nxv1f32.f32( , float, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfrdiv_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +define @intrinsic_vfrdiv_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -311,7 +313,7 @@ entry: %1, float %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -319,9 +321,9 @@ entry: declare @llvm.riscv.vfrdiv.nxv2f32.f32( , float, - i32); + iXLen); -define @intrinsic_vfrdiv_vf_nxv2f32_nxv2f32_f32( %0, float %1, i32 %2) nounwind { +define @intrinsic_vfrdiv_vf_nxv2f32_nxv2f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -331,7 +333,7 @@ entry: %a = call @llvm.riscv.vfrdiv.nxv2f32.f32( %0, float %1, - i32 %2) + iXLen %2) ret %a } @@ -341,10 +343,10 @@ declare @llvm.riscv.vfrdiv.mask.nxv2f32.f32( , float, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfrdiv_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +define @intrinsic_vfrdiv_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -356,7 +358,7 @@ entry: %1, float %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -364,9 +366,9 @@ entry: declare @llvm.riscv.vfrdiv.nxv4f32.f32( , float, - i32); + iXLen); -define @intrinsic_vfrdiv_vf_nxv4f32_nxv4f32_f32( %0, float %1, i32 %2) nounwind { +define @intrinsic_vfrdiv_vf_nxv4f32_nxv4f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -376,7 +378,7 @@ entry: %a = call @llvm.riscv.vfrdiv.nxv4f32.f32( %0, float %1, - i32 %2) + iXLen %2) ret %a } @@ -386,10 +388,10 @@ declare @llvm.riscv.vfrdiv.mask.nxv4f32.f32( , float, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfrdiv_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +define @intrinsic_vfrdiv_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -401,7 +403,7 @@ entry: %1, float %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -409,9 +411,9 @@ entry: declare @llvm.riscv.vfrdiv.nxv8f32.f32( , float, - i32); + iXLen); -define @intrinsic_vfrdiv_vf_nxv8f32_nxv8f32_f32( %0, float %1, i32 %2) nounwind { +define @intrinsic_vfrdiv_vf_nxv8f32_nxv8f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -421,7 +423,7 @@ entry: %a = call @llvm.riscv.vfrdiv.nxv8f32.f32( %0, float %1, - i32 %2) + iXLen %2) ret %a } @@ -431,10 +433,10 @@ declare @llvm.riscv.vfrdiv.mask.nxv8f32.f32( , float, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfrdiv_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +define @intrinsic_vfrdiv_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -446,7 +448,7 @@ entry: %1, float %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -454,9 +456,9 @@ entry: declare @llvm.riscv.vfrdiv.nxv16f32.f32( , float, - i32); + iXLen); -define @intrinsic_vfrdiv_vf_nxv16f32_nxv16f32_f32( %0, float %1, i32 %2) nounwind { +define @intrinsic_vfrdiv_vf_nxv16f32_nxv16f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu @@ -466,7 +468,7 @@ entry: %a = call @llvm.riscv.vfrdiv.nxv16f32.f32( %0, float %1, - i32 %2) + iXLen %2) ret %a } @@ -476,10 +478,10 @@ declare @llvm.riscv.vfrdiv.mask.nxv16f32.f32( , float, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfrdiv_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +define @intrinsic_vfrdiv_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu @@ -491,7 +493,7 @@ entry: %1, float %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -499,9 +501,9 @@ entry: declare @llvm.riscv.vfrdiv.nxv1f64.f64( , double, - i32); + iXLen); -define @intrinsic_vfrdiv_vf_nxv1f64_nxv1f64_f64( %0, double %1, i32 %2) nounwind { +define @intrinsic_vfrdiv_vf_nxv1f64_nxv1f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu @@ -511,7 +513,7 @@ entry: %a = call @llvm.riscv.vfrdiv.nxv1f64.f64( %0, double %1, - i32 %2) + iXLen %2) ret %a } @@ -521,10 +523,10 @@ declare @llvm.riscv.vfrdiv.mask.nxv1f64.f64( , double, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfrdiv_mask_vf_nxv1f64_nxv1f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { +define @intrinsic_vfrdiv_mask_vf_nxv1f64_nxv1f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu @@ -536,7 +538,7 @@ entry: %1, double %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -544,9 +546,9 @@ entry: declare @llvm.riscv.vfrdiv.nxv2f64.f64( , double, - i32); + iXLen); -define @intrinsic_vfrdiv_vf_nxv2f64_nxv2f64_f64( %0, double %1, i32 %2) nounwind { +define @intrinsic_vfrdiv_vf_nxv2f64_nxv2f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu @@ -556,7 +558,7 @@ entry: %a = call @llvm.riscv.vfrdiv.nxv2f64.f64( %0, double %1, - i32 %2) + iXLen %2) ret %a } @@ -566,10 +568,10 @@ declare @llvm.riscv.vfrdiv.mask.nxv2f64.f64( , double, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfrdiv_mask_vf_nxv2f64_nxv2f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { +define @intrinsic_vfrdiv_mask_vf_nxv2f64_nxv2f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu @@ -581,7 +583,7 @@ entry: %1, double %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -589,9 +591,9 @@ entry: declare @llvm.riscv.vfrdiv.nxv4f64.f64( , double, - i32); + iXLen); -define @intrinsic_vfrdiv_vf_nxv4f64_nxv4f64_f64( %0, double %1, i32 %2) nounwind { +define @intrinsic_vfrdiv_vf_nxv4f64_nxv4f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu @@ -601,7 +603,7 @@ entry: %a = call @llvm.riscv.vfrdiv.nxv4f64.f64( %0, double %1, - i32 %2) + iXLen %2) ret %a } @@ -611,10 +613,10 @@ declare @llvm.riscv.vfrdiv.mask.nxv4f64.f64( , double, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfrdiv_mask_vf_nxv4f64_nxv4f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { +define @intrinsic_vfrdiv_mask_vf_nxv4f64_nxv4f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu @@ -626,7 +628,7 @@ entry: %1, double %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -634,9 +636,9 @@ entry: declare @llvm.riscv.vfrdiv.nxv8f64.f64( , double, - i32); + iXLen); -define @intrinsic_vfrdiv_vf_nxv8f64_nxv8f64_f64( %0, double %1, i32 %2) nounwind { +define @intrinsic_vfrdiv_vf_nxv8f64_nxv8f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu @@ -646,7 +648,7 @@ entry: %a = call @llvm.riscv.vfrdiv.nxv8f64.f64( %0, double %1, - i32 %2) + iXLen %2) ret %a } @@ -656,10 +658,10 @@ declare @llvm.riscv.vfrdiv.mask.nxv8f64.f64( , double, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfrdiv_mask_vf_nxv8f64_nxv8f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { +define @intrinsic_vfrdiv_mask_vf_nxv8f64_nxv8f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu @@ -671,7 +673,7 @@ entry: %1, double %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfrec7-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfrec7-rv64.ll deleted file mode 100644 index 4e0fe7b9fc62..000000000000 --- a/llvm/test/CodeGen/RISCV/rvv/vfrec7-rv64.ll +++ /dev/null @@ -1,617 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s -declare @llvm.riscv.vfrec7.nxv1f16( - , - i64); - -define @intrinsic_vfrec7_v_nxv1f16_nxv1f16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfrec7_v_nxv1f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfrec7.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrec7.nxv1f16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfrec7.mask.nxv1f16( - , - , - , - i64, - i64); - -define @intrinsic_vfrec7_mask_v_nxv1f16_nxv1f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv1f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfrec7.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrec7.mask.nxv1f16( - %1, - %2, - %0, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfrec7.nxv2f16( - , - i64); - -define @intrinsic_vfrec7_v_nxv2f16_nxv2f16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfrec7_v_nxv2f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfrec7.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrec7.nxv2f16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfrec7.mask.nxv2f16( - , - , - , - i64, - i64); - -define @intrinsic_vfrec7_mask_v_nxv2f16_nxv2f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv2f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfrec7.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrec7.mask.nxv2f16( - %1, - %2, - %0, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfrec7.nxv4f16( - , - i64); - -define @intrinsic_vfrec7_v_nxv4f16_nxv4f16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfrec7_v_nxv4f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfrec7.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrec7.nxv4f16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfrec7.mask.nxv4f16( - , - , - , - i64, - i64); - -define @intrinsic_vfrec7_mask_v_nxv4f16_nxv4f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv4f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfrec7.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrec7.mask.nxv4f16( - %1, - %2, - %0, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfrec7.nxv8f16( - , - i64); - -define @intrinsic_vfrec7_v_nxv8f16_nxv8f16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfrec7_v_nxv8f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfrec7.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrec7.nxv8f16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfrec7.mask.nxv8f16( - , - , - , - i64, - i64); - -define @intrinsic_vfrec7_mask_v_nxv8f16_nxv8f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv8f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfrec7.v v8, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrec7.mask.nxv8f16( - %1, - %2, - %0, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfrec7.nxv16f16( - , - i64); - -define @intrinsic_vfrec7_v_nxv16f16_nxv16f16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfrec7_v_nxv16f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfrec7.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrec7.nxv16f16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfrec7.mask.nxv16f16( - , - , - , - i64, - i64); - -define @intrinsic_vfrec7_mask_v_nxv16f16_nxv16f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv16f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfrec7.v v8, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrec7.mask.nxv16f16( - %1, - %2, - %0, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfrec7.nxv32f16( - , - i64); - -define @intrinsic_vfrec7_v_nxv32f16_nxv32f16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfrec7_v_nxv32f16_nxv32f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu -; CHECK-NEXT: vfrec7.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrec7.nxv32f16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfrec7.mask.nxv32f16( - , - , - , - i64, - i64); - -define @intrinsic_vfrec7_mask_v_nxv32f16_nxv32f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv32f16_nxv32f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu -; CHECK-NEXT: vfrec7.v v8, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrec7.mask.nxv32f16( - %1, - %2, - %0, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfrec7.nxv1f32( - , - i64); - -define @intrinsic_vfrec7_v_nxv1f32_nxv1f32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfrec7_v_nxv1f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfrec7.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrec7.nxv1f32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfrec7.mask.nxv1f32( - , - , - , - i64, - i64); - -define @intrinsic_vfrec7_mask_v_nxv1f32_nxv1f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv1f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfrec7.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrec7.mask.nxv1f32( - %1, - %2, - %0, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfrec7.nxv2f32( - , - i64); - -define @intrinsic_vfrec7_v_nxv2f32_nxv2f32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfrec7_v_nxv2f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfrec7.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrec7.nxv2f32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfrec7.mask.nxv2f32( - , - , - , - i64, - i64); - -define @intrinsic_vfrec7_mask_v_nxv2f32_nxv2f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv2f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfrec7.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrec7.mask.nxv2f32( - %1, - %2, - %0, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfrec7.nxv4f32( - , - i64); - -define @intrinsic_vfrec7_v_nxv4f32_nxv4f32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfrec7_v_nxv4f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfrec7.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrec7.nxv4f32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfrec7.mask.nxv4f32( - , - , - , - i64, - i64); - -define @intrinsic_vfrec7_mask_v_nxv4f32_nxv4f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv4f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfrec7.v v8, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrec7.mask.nxv4f32( - %1, - %2, - %0, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfrec7.nxv8f32( - , - i64); - -define @intrinsic_vfrec7_v_nxv8f32_nxv8f32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfrec7_v_nxv8f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfrec7.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrec7.nxv8f32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfrec7.mask.nxv8f32( - , - , - , - i64, - i64); - -define @intrinsic_vfrec7_mask_v_nxv8f32_nxv8f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv8f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfrec7.v v8, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrec7.mask.nxv8f32( - %1, - %2, - %0, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfrec7.nxv16f32( - , - i64); - -define @intrinsic_vfrec7_v_nxv16f32_nxv16f32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfrec7_v_nxv16f32_nxv16f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu -; CHECK-NEXT: vfrec7.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrec7.nxv16f32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfrec7.mask.nxv16f32( - , - , - , - i64, - i64); - -define @intrinsic_vfrec7_mask_v_nxv16f32_nxv16f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv16f32_nxv16f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu -; CHECK-NEXT: vfrec7.v v8, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrec7.mask.nxv16f32( - %1, - %2, - %0, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfrec7.nxv1f64( - , - i64); - -define @intrinsic_vfrec7_v_nxv1f64_nxv1f64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfrec7_v_nxv1f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vfrec7.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrec7.nxv1f64( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfrec7.mask.nxv1f64( - , - , - , - i64, - i64); - -define @intrinsic_vfrec7_mask_v_nxv1f64_nxv1f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv1f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vfrec7.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrec7.mask.nxv1f64( - %1, - %2, - %0, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfrec7.nxv2f64( - , - i64); - -define @intrinsic_vfrec7_v_nxv2f64_nxv2f64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfrec7_v_nxv2f64_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vfrec7.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrec7.nxv2f64( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfrec7.mask.nxv2f64( - , - , - , - i64, - i64); - -define @intrinsic_vfrec7_mask_v_nxv2f64_nxv2f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv2f64_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vfrec7.v v8, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrec7.mask.nxv2f64( - %1, - %2, - %0, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfrec7.nxv4f64( - , - i64); - -define @intrinsic_vfrec7_v_nxv4f64_nxv4f64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfrec7_v_nxv4f64_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vfrec7.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrec7.nxv4f64( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfrec7.mask.nxv4f64( - , - , - , - i64, - i64); - -define @intrinsic_vfrec7_mask_v_nxv4f64_nxv4f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv4f64_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vfrec7.v v8, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrec7.mask.nxv4f64( - %1, - %2, - %0, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfrec7.nxv8f64( - , - i64); - -define @intrinsic_vfrec7_v_nxv8f64_nxv8f64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfrec7_v_nxv8f64_nxv8f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu -; CHECK-NEXT: vfrec7.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrec7.nxv8f64( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfrec7.mask.nxv8f64( - , - , - , - i64, - i64); - -define @intrinsic_vfrec7_mask_v_nxv8f64_nxv8f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv8f64_nxv8f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu -; CHECK-NEXT: vfrec7.v v8, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrec7.mask.nxv8f64( - %1, - %2, - %0, - i64 %3, i64 1) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfrec7-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfrec7.ll similarity index 87% rename from llvm/test/CodeGen/RISCV/rvv/vfrec7-rv32.ll rename to llvm/test/CodeGen/RISCV/rvv/vfrec7.ll index 30897b95deea..3be9f912d7f2 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfrec7-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfrec7.ll @@ -1,11 +1,13 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfrec7.nxv1f16( , - i32); + iXLen); -define @intrinsic_vfrec7_v_nxv1f16_nxv1f16( %0, i32 %1) nounwind { +define @intrinsic_vfrec7_v_nxv1f16_nxv1f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_v_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -14,7 +16,7 @@ define @intrinsic_vfrec7_v_nxv1f16_nxv1f16( @llvm.riscv.vfrec7.nxv1f16( %0, - i32 %1) + iXLen %1) ret %a } @@ -23,10 +25,10 @@ declare @llvm.riscv.vfrec7.mask.nxv1f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfrec7_mask_v_nxv1f16_nxv1f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfrec7_mask_v_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -37,16 +39,16 @@ entry: %1, %2, %0, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfrec7.nxv2f16( , - i32); + iXLen); -define @intrinsic_vfrec7_v_nxv2f16_nxv2f16( %0, i32 %1) nounwind { +define @intrinsic_vfrec7_v_nxv2f16_nxv2f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_v_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -55,7 +57,7 @@ define @intrinsic_vfrec7_v_nxv2f16_nxv2f16( @llvm.riscv.vfrec7.nxv2f16( %0, - i32 %1) + iXLen %1) ret %a } @@ -64,10 +66,10 @@ declare @llvm.riscv.vfrec7.mask.nxv2f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfrec7_mask_v_nxv2f16_nxv2f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfrec7_mask_v_nxv2f16_nxv2f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -78,16 +80,16 @@ entry: %1, %2, %0, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfrec7.nxv4f16( , - i32); + iXLen); -define @intrinsic_vfrec7_v_nxv4f16_nxv4f16( %0, i32 %1) nounwind { +define @intrinsic_vfrec7_v_nxv4f16_nxv4f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_v_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -96,7 +98,7 @@ define @intrinsic_vfrec7_v_nxv4f16_nxv4f16( @llvm.riscv.vfrec7.nxv4f16( %0, - i32 %1) + iXLen %1) ret %a } @@ -105,10 +107,10 @@ declare @llvm.riscv.vfrec7.mask.nxv4f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfrec7_mask_v_nxv4f16_nxv4f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfrec7_mask_v_nxv4f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -119,16 +121,16 @@ entry: %1, %2, %0, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfrec7.nxv8f16( , - i32); + iXLen); -define @intrinsic_vfrec7_v_nxv8f16_nxv8f16( %0, i32 %1) nounwind { +define @intrinsic_vfrec7_v_nxv8f16_nxv8f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_v_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -137,7 +139,7 @@ define @intrinsic_vfrec7_v_nxv8f16_nxv8f16( @llvm.riscv.vfrec7.nxv8f16( %0, - i32 %1) + iXLen %1) ret %a } @@ -146,10 +148,10 @@ declare @llvm.riscv.vfrec7.mask.nxv8f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfrec7_mask_v_nxv8f16_nxv8f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfrec7_mask_v_nxv8f16_nxv8f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -160,16 +162,16 @@ entry: %1, %2, %0, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfrec7.nxv16f16( , - i32); + iXLen); -define @intrinsic_vfrec7_v_nxv16f16_nxv16f16( %0, i32 %1) nounwind { +define @intrinsic_vfrec7_v_nxv16f16_nxv16f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_v_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -178,7 +180,7 @@ define @intrinsic_vfrec7_v_nxv16f16_nxv16f16( @llvm.riscv.vfrec7.nxv16f16( %0, - i32 %1) + iXLen %1) ret %a } @@ -187,10 +189,10 @@ declare @llvm.riscv.vfrec7.mask.nxv16f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfrec7_mask_v_nxv16f16_nxv16f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfrec7_mask_v_nxv16f16_nxv16f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -201,16 +203,16 @@ entry: %1, %2, %0, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfrec7.nxv32f16( , - i32); + iXLen); -define @intrinsic_vfrec7_v_nxv32f16_nxv32f16( %0, i32 %1) nounwind { +define @intrinsic_vfrec7_v_nxv32f16_nxv32f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_v_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu @@ -219,7 +221,7 @@ define @intrinsic_vfrec7_v_nxv32f16_nxv32f16( @llvm.riscv.vfrec7.nxv32f16( %0, - i32 %1) + iXLen %1) ret %a } @@ -228,10 +230,10 @@ declare @llvm.riscv.vfrec7.mask.nxv32f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfrec7_mask_v_nxv32f16_nxv32f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfrec7_mask_v_nxv32f16_nxv32f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu @@ -242,16 +244,16 @@ entry: %1, %2, %0, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfrec7.nxv1f32( , - i32); + iXLen); -define @intrinsic_vfrec7_v_nxv1f32_nxv1f32( %0, i32 %1) nounwind { +define @intrinsic_vfrec7_v_nxv1f32_nxv1f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_v_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -260,7 +262,7 @@ define @intrinsic_vfrec7_v_nxv1f32_nxv1f32( @llvm.riscv.vfrec7.nxv1f32( %0, - i32 %1) + iXLen %1) ret %a } @@ -269,10 +271,10 @@ declare @llvm.riscv.vfrec7.mask.nxv1f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfrec7_mask_v_nxv1f32_nxv1f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfrec7_mask_v_nxv1f32_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -283,16 +285,16 @@ entry: %1, %2, %0, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfrec7.nxv2f32( , - i32); + iXLen); -define @intrinsic_vfrec7_v_nxv2f32_nxv2f32( %0, i32 %1) nounwind { +define @intrinsic_vfrec7_v_nxv2f32_nxv2f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_v_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -301,7 +303,7 @@ define @intrinsic_vfrec7_v_nxv2f32_nxv2f32( @llvm.riscv.vfrec7.nxv2f32( %0, - i32 %1) + iXLen %1) ret %a } @@ -310,10 +312,10 @@ declare @llvm.riscv.vfrec7.mask.nxv2f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfrec7_mask_v_nxv2f32_nxv2f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfrec7_mask_v_nxv2f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -324,16 +326,16 @@ entry: %1, %2, %0, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfrec7.nxv4f32( , - i32); + iXLen); -define @intrinsic_vfrec7_v_nxv4f32_nxv4f32( %0, i32 %1) nounwind { +define @intrinsic_vfrec7_v_nxv4f32_nxv4f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_v_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -342,7 +344,7 @@ define @intrinsic_vfrec7_v_nxv4f32_nxv4f32( @llvm.riscv.vfrec7.nxv4f32( %0, - i32 %1) + iXLen %1) ret %a } @@ -351,10 +353,10 @@ declare @llvm.riscv.vfrec7.mask.nxv4f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfrec7_mask_v_nxv4f32_nxv4f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfrec7_mask_v_nxv4f32_nxv4f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -365,16 +367,16 @@ entry: %1, %2, %0, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfrec7.nxv8f32( , - i32); + iXLen); -define @intrinsic_vfrec7_v_nxv8f32_nxv8f32( %0, i32 %1) nounwind { +define @intrinsic_vfrec7_v_nxv8f32_nxv8f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_v_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -383,7 +385,7 @@ define @intrinsic_vfrec7_v_nxv8f32_nxv8f32( @llvm.riscv.vfrec7.nxv8f32( %0, - i32 %1) + iXLen %1) ret %a } @@ -392,10 +394,10 @@ declare @llvm.riscv.vfrec7.mask.nxv8f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfrec7_mask_v_nxv8f32_nxv8f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfrec7_mask_v_nxv8f32_nxv8f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -406,16 +408,16 @@ entry: %1, %2, %0, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfrec7.nxv16f32( , - i32); + iXLen); -define @intrinsic_vfrec7_v_nxv16f32_nxv16f32( %0, i32 %1) nounwind { +define @intrinsic_vfrec7_v_nxv16f32_nxv16f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_v_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu @@ -424,7 +426,7 @@ define @intrinsic_vfrec7_v_nxv16f32_nxv16f32( @llvm.riscv.vfrec7.nxv16f32( %0, - i32 %1) + iXLen %1) ret %a } @@ -433,10 +435,10 @@ declare @llvm.riscv.vfrec7.mask.nxv16f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfrec7_mask_v_nxv16f32_nxv16f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfrec7_mask_v_nxv16f32_nxv16f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu @@ -447,16 +449,16 @@ entry: %1, %2, %0, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfrec7.nxv1f64( , - i32); + iXLen); -define @intrinsic_vfrec7_v_nxv1f64_nxv1f64( %0, i32 %1) nounwind { +define @intrinsic_vfrec7_v_nxv1f64_nxv1f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_v_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu @@ -465,7 +467,7 @@ define @intrinsic_vfrec7_v_nxv1f64_nxv1f64( @llvm.riscv.vfrec7.nxv1f64( %0, - i32 %1) + iXLen %1) ret %a } @@ -474,10 +476,10 @@ declare @llvm.riscv.vfrec7.mask.nxv1f64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfrec7_mask_v_nxv1f64_nxv1f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfrec7_mask_v_nxv1f64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu @@ -488,16 +490,16 @@ entry: %1, %2, %0, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfrec7.nxv2f64( , - i32); + iXLen); -define @intrinsic_vfrec7_v_nxv2f64_nxv2f64( %0, i32 %1) nounwind { +define @intrinsic_vfrec7_v_nxv2f64_nxv2f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_v_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu @@ -506,7 +508,7 @@ define @intrinsic_vfrec7_v_nxv2f64_nxv2f64( @llvm.riscv.vfrec7.nxv2f64( %0, - i32 %1) + iXLen %1) ret %a } @@ -515,10 +517,10 @@ declare @llvm.riscv.vfrec7.mask.nxv2f64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfrec7_mask_v_nxv2f64_nxv2f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfrec7_mask_v_nxv2f64_nxv2f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu @@ -529,16 +531,16 @@ entry: %1, %2, %0, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfrec7.nxv4f64( , - i32); + iXLen); -define @intrinsic_vfrec7_v_nxv4f64_nxv4f64( %0, i32 %1) nounwind { +define @intrinsic_vfrec7_v_nxv4f64_nxv4f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_v_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu @@ -547,7 +549,7 @@ define @intrinsic_vfrec7_v_nxv4f64_nxv4f64( @llvm.riscv.vfrec7.nxv4f64( %0, - i32 %1) + iXLen %1) ret %a } @@ -556,10 +558,10 @@ declare @llvm.riscv.vfrec7.mask.nxv4f64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfrec7_mask_v_nxv4f64_nxv4f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfrec7_mask_v_nxv4f64_nxv4f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu @@ -570,16 +572,16 @@ entry: %1, %2, %0, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfrec7.nxv8f64( , - i32); + iXLen); -define @intrinsic_vfrec7_v_nxv8f64_nxv8f64( %0, i32 %1) nounwind { +define @intrinsic_vfrec7_v_nxv8f64_nxv8f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_v_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu @@ -588,7 +590,7 @@ define @intrinsic_vfrec7_v_nxv8f64_nxv8f64( @llvm.riscv.vfrec7.nxv8f64( %0, - i32 %1) + iXLen %1) ret %a } @@ -597,10 +599,10 @@ declare @llvm.riscv.vfrec7.mask.nxv8f64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfrec7_mask_v_nxv8f64_nxv8f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfrec7_mask_v_nxv8f64_nxv8f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu @@ -611,7 +613,7 @@ entry: %1, %2, %0, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfredmax-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfredmax-rv64.ll deleted file mode 100644 index bb173e91ecf3..000000000000 --- a/llvm/test/CodeGen/RISCV/rvv/vfredmax-rv64.ll +++ /dev/null @@ -1,692 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s -declare @llvm.riscv.vfredmax.nxv4f16.nxv1f16( - , - , - , - i64); - -define @intrinsic_vfredmax_vs_nxv4f16_nxv1f16_nxv4f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfredmax_vs_nxv4f16_nxv1f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu -; CHECK-NEXT: vfredmax.vs v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredmax.nxv4f16.nxv1f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfredmax.mask.nxv4f16.nxv1f16( - , - , - , - , - i64); - -define @intrinsic_vfredmax_mask_vs_nxv4f16_nxv1f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv4f16_nxv1f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu -; CHECK-NEXT: vfredmax.vs v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredmax.mask.nxv4f16.nxv1f16( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfredmax.nxv4f16.nxv2f16( - , - , - , - i64); - -define @intrinsic_vfredmax_vs_nxv4f16_nxv2f16_nxv4f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfredmax_vs_nxv4f16_nxv2f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu -; CHECK-NEXT: vfredmax.vs v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredmax.nxv4f16.nxv2f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfredmax.mask.nxv4f16.nxv2f16( - , - , - , - , - i64); - -define @intrinsic_vfredmax_mask_vs_nxv4f16_nxv2f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv4f16_nxv2f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu -; CHECK-NEXT: vfredmax.vs v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredmax.mask.nxv4f16.nxv2f16( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfredmax.nxv4f16.nxv4f16( - , - , - , - i64); - -define @intrinsic_vfredmax_vs_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfredmax_vs_nxv4f16_nxv4f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu -; CHECK-NEXT: vfredmax.vs v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredmax.nxv4f16.nxv4f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfredmax.mask.nxv4f16.nxv4f16( - , - , - , - , - i64); - -define @intrinsic_vfredmax_mask_vs_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv4f16_nxv4f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu -; CHECK-NEXT: vfredmax.vs v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredmax.mask.nxv4f16.nxv4f16( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfredmax.nxv4f16.nxv8f16( - , - , - , - i64); - -define @intrinsic_vfredmax_vs_nxv4f16_nxv8f16_nxv4f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfredmax_vs_nxv4f16_nxv8f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu -; CHECK-NEXT: vfredmax.vs v8, v10, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredmax.nxv4f16.nxv8f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfredmax.mask.nxv4f16.nxv8f16( - , - , - , - , - i64); - -define @intrinsic_vfredmax_mask_vs_nxv4f16_nxv8f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv4f16_nxv8f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu -; CHECK-NEXT: vfredmax.vs v8, v10, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredmax.mask.nxv4f16.nxv8f16( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfredmax.nxv4f16.nxv16f16( - , - , - , - i64); - -define @intrinsic_vfredmax_vs_nxv4f16_nxv16f16_nxv4f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfredmax_vs_nxv4f16_nxv16f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu -; CHECK-NEXT: vfredmax.vs v8, v12, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredmax.nxv4f16.nxv16f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfredmax.mask.nxv4f16.nxv16f16( - , - , - , - , - i64); - -define @intrinsic_vfredmax_mask_vs_nxv4f16_nxv16f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv4f16_nxv16f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu -; CHECK-NEXT: vfredmax.vs v8, v12, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredmax.mask.nxv4f16.nxv16f16( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfredmax.nxv4f16.nxv32f16( - , - , - , - i64); - -define @intrinsic_vfredmax_vs_nxv4f16_nxv32f16_nxv4f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfredmax_vs_nxv4f16_nxv32f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu -; CHECK-NEXT: vfredmax.vs v8, v16, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredmax.nxv4f16.nxv32f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfredmax.mask.nxv4f16.nxv32f16( - , - , - , - , - i64); - -define @intrinsic_vfredmax_mask_vs_nxv4f16_nxv32f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv4f16_nxv32f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu -; CHECK-NEXT: vfredmax.vs v8, v16, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredmax.mask.nxv4f16.nxv32f16( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfredmax.nxv2f32.nxv1f32( - , - , - , - i64); - -define @intrinsic_vfredmax_vs_nxv2f32_nxv1f32_nxv2f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfredmax_vs_nxv2f32_nxv1f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu -; CHECK-NEXT: vfredmax.vs v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredmax.nxv2f32.nxv1f32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfredmax.mask.nxv2f32.nxv1f32( - , - , - , - , - i64); - -define @intrinsic_vfredmax_mask_vs_nxv2f32_nxv1f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv2f32_nxv1f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu -; CHECK-NEXT: vfredmax.vs v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv1f32( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfredmax.nxv2f32.nxv2f32( - , - , - , - i64); - -define @intrinsic_vfredmax_vs_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfredmax_vs_nxv2f32_nxv2f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu -; CHECK-NEXT: vfredmax.vs v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredmax.nxv2f32.nxv2f32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfredmax.mask.nxv2f32.nxv2f32( - , - , - , - , - i64); - -define @intrinsic_vfredmax_mask_vs_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv2f32_nxv2f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu -; CHECK-NEXT: vfredmax.vs v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv2f32( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfredmax.nxv2f32.nxv4f32( - , - , - , - i64); - -define @intrinsic_vfredmax_vs_nxv2f32_nxv4f32_nxv2f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfredmax_vs_nxv2f32_nxv4f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu -; CHECK-NEXT: vfredmax.vs v8, v10, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredmax.nxv2f32.nxv4f32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfredmax.mask.nxv2f32.nxv4f32( - , - , - , - , - i64); - -define @intrinsic_vfredmax_mask_vs_nxv2f32_nxv4f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv2f32_nxv4f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu -; CHECK-NEXT: vfredmax.vs v8, v10, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv4f32( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfredmax.nxv2f32.nxv8f32( - , - , - , - i64); - -define @intrinsic_vfredmax_vs_nxv2f32_nxv8f32_nxv2f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfredmax_vs_nxv2f32_nxv8f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu -; CHECK-NEXT: vfredmax.vs v8, v12, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredmax.nxv2f32.nxv8f32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfredmax.mask.nxv2f32.nxv8f32( - , - , - , - , - i64); - -define @intrinsic_vfredmax_mask_vs_nxv2f32_nxv8f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv2f32_nxv8f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu -; CHECK-NEXT: vfredmax.vs v8, v12, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv8f32( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfredmax.nxv2f32.nxv16f32( - , - , - , - i64); - -define @intrinsic_vfredmax_vs_nxv2f32_nxv16f32_nxv2f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfredmax_vs_nxv2f32_nxv16f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu -; CHECK-NEXT: vfredmax.vs v8, v16, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredmax.nxv2f32.nxv16f32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfredmax.mask.nxv2f32.nxv16f32( - , - , - , - , - i64); - -define @intrinsic_vfredmax_mask_vs_nxv2f32_nxv16f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv2f32_nxv16f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu -; CHECK-NEXT: vfredmax.vs v8, v16, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv16f32( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfredmax.nxv1f64.nxv1f64( - , - , - , - i64); - -define @intrinsic_vfredmax_vs_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfredmax_vs_nxv1f64_nxv1f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu -; CHECK-NEXT: vfredmax.vs v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredmax.nxv1f64.nxv1f64( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfredmax.mask.nxv1f64.nxv1f64( - , - , - , - , - i64); - -define @intrinsic_vfredmax_mask_vs_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv1f64_nxv1f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu -; CHECK-NEXT: vfredmax.vs v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredmax.mask.nxv1f64.nxv1f64( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfredmax.nxv1f64.nxv2f64( - , - , - , - i64); - -define @intrinsic_vfredmax_vs_nxv1f64_nxv2f64_nxv1f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfredmax_vs_nxv1f64_nxv2f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu -; CHECK-NEXT: vfredmax.vs v8, v10, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredmax.nxv1f64.nxv2f64( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfredmax.mask.nxv1f64.nxv2f64( - , - , - , - , - i64); - -define @intrinsic_vfredmax_mask_vs_nxv1f64_nxv2f64_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv1f64_nxv2f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu -; CHECK-NEXT: vfredmax.vs v8, v10, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredmax.mask.nxv1f64.nxv2f64( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfredmax.nxv1f64.nxv4f64( - , - , - , - i64); - -define @intrinsic_vfredmax_vs_nxv1f64_nxv4f64_nxv1f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfredmax_vs_nxv1f64_nxv4f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu -; CHECK-NEXT: vfredmax.vs v8, v12, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredmax.nxv1f64.nxv4f64( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfredmax.mask.nxv1f64.nxv4f64( - , - , - , - , - i64); - -define @intrinsic_vfredmax_mask_vs_nxv1f64_nxv4f64_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv1f64_nxv4f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu -; CHECK-NEXT: vfredmax.vs v8, v12, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredmax.mask.nxv1f64.nxv4f64( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfredmax.nxv1f64.nxv8f64( - , - , - , - i64); - -define @intrinsic_vfredmax_vs_nxv1f64_nxv8f64_nxv1f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfredmax_vs_nxv1f64_nxv8f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu -; CHECK-NEXT: vfredmax.vs v8, v16, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredmax.nxv1f64.nxv8f64( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfredmax.mask.nxv1f64.nxv8f64( - , - , - , - , - i64); - -define @intrinsic_vfredmax_mask_vs_nxv1f64_nxv8f64_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv1f64_nxv8f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu -; CHECK-NEXT: vfredmax.vs v8, v16, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredmax.mask.nxv1f64.nxv8f64( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfredmax-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfredmax.ll similarity index 88% rename from llvm/test/CodeGen/RISCV/rvv/vfredmax-rv32.ll rename to llvm/test/CodeGen/RISCV/rvv/vfredmax.ll index 25ed3f1ab367..0a2d72bf382a 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfredmax-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfredmax.ll @@ -1,13 +1,15 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfredmax.nxv4f16.nxv1f16( , , , - i32); + iXLen); -define @intrinsic_vfredmax_vs_nxv4f16_nxv1f16_nxv4f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfredmax_vs_nxv4f16_nxv1f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_vs_nxv4f16_nxv1f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu @@ -18,7 +20,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -28,9 +30,9 @@ declare @llvm.riscv.vfredmax.mask.nxv4f16.nxv1f16.nxv1i1( , , , - i32); + iXLen); -define @intrinsic_vfredmax_mask_vs_nxv4f16_nxv1f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfredmax_mask_vs_nxv4f16_nxv1f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv4f16_nxv1f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu @@ -42,7 +44,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -51,9 +53,9 @@ declare @llvm.riscv.vfredmax.nxv4f16.nxv2f16( , , , - i32); + iXLen); -define @intrinsic_vfredmax_vs_nxv4f16_nxv2f16_nxv4f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfredmax_vs_nxv4f16_nxv2f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_vs_nxv4f16_nxv2f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu @@ -64,7 +66,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -74,9 +76,9 @@ declare @llvm.riscv.vfredmax.mask.nxv4f16.nxv2f16.nxv2i1( , , , - i32); + iXLen); -define @intrinsic_vfredmax_mask_vs_nxv4f16_nxv2f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfredmax_mask_vs_nxv4f16_nxv2f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv4f16_nxv2f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu @@ -88,7 +90,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -97,9 +99,9 @@ declare @llvm.riscv.vfredmax.nxv4f16.nxv4f16( , , , - i32); + iXLen); -define @intrinsic_vfredmax_vs_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfredmax_vs_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_vs_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu @@ -110,7 +112,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -120,9 +122,9 @@ declare @llvm.riscv.vfredmax.mask.nxv4f16.nxv4f16.nxv4i1( , , , - i32); + iXLen); -define @intrinsic_vfredmax_mask_vs_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfredmax_mask_vs_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu @@ -134,7 +136,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -143,9 +145,9 @@ declare @llvm.riscv.vfredmax.nxv4f16.nxv8f16( , , , - i32); + iXLen); -define @intrinsic_vfredmax_vs_nxv4f16_nxv8f16_nxv4f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfredmax_vs_nxv4f16_nxv8f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_vs_nxv4f16_nxv8f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu @@ -156,7 +158,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -166,9 +168,9 @@ declare @llvm.riscv.vfredmax.mask.nxv4f16.nxv8f16.nxv8i1( , , , - i32); + iXLen); -define @intrinsic_vfredmax_mask_vs_nxv4f16_nxv8f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfredmax_mask_vs_nxv4f16_nxv8f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv4f16_nxv8f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu @@ -180,7 +182,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -189,9 +191,9 @@ declare @llvm.riscv.vfredmax.nxv4f16.nxv16f16( , , , - i32); + iXLen); -define @intrinsic_vfredmax_vs_nxv4f16_nxv16f16_nxv4f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfredmax_vs_nxv4f16_nxv16f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_vs_nxv4f16_nxv16f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu @@ -202,7 +204,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -212,9 +214,9 @@ declare @llvm.riscv.vfredmax.mask.nxv4f16.nxv16f16.nxv16i1( , , , - i32); + iXLen); -define @intrinsic_vfredmax_mask_vs_nxv4f16_nxv16f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfredmax_mask_vs_nxv4f16_nxv16f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv4f16_nxv16f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu @@ -226,7 +228,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -235,9 +237,9 @@ declare @llvm.riscv.vfredmax.nxv4f16.nxv32f16( , , , - i32); + iXLen); -define @intrinsic_vfredmax_vs_nxv4f16_nxv32f16_nxv4f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfredmax_vs_nxv4f16_nxv32f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_vs_nxv4f16_nxv32f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu @@ -248,7 +250,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -258,9 +260,9 @@ declare @llvm.riscv.vfredmax.mask.nxv4f16.nxv32f16.nxv32i1( , , , - i32); + iXLen); -define @intrinsic_vfredmax_mask_vs_nxv4f16_nxv32f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfredmax_mask_vs_nxv4f16_nxv32f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv4f16_nxv32f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu @@ -272,7 +274,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -281,9 +283,9 @@ declare @llvm.riscv.vfredmax.nxv2f32.nxv1f32( , , , - i32); + iXLen); -define @intrinsic_vfredmax_vs_nxv2f32_nxv1f32_nxv2f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfredmax_vs_nxv2f32_nxv1f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_vs_nxv2f32_nxv1f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu @@ -294,7 +296,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -304,9 +306,9 @@ declare @llvm.riscv.vfredmax.mask.nxv2f32.nxv1f32.nxv1i1( , , , - i32); + iXLen); -define @intrinsic_vfredmax_mask_vs_nxv2f32_nxv1f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfredmax_mask_vs_nxv2f32_nxv1f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv2f32_nxv1f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu @@ -318,7 +320,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -327,9 +329,9 @@ declare @llvm.riscv.vfredmax.nxv2f32.nxv2f32( , , , - i32); + iXLen); -define @intrinsic_vfredmax_vs_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfredmax_vs_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_vs_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu @@ -340,7 +342,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -350,9 +352,9 @@ declare @llvm.riscv.vfredmax.mask.nxv2f32.nxv2f32.nxv2i1( , , , - i32); + iXLen); -define @intrinsic_vfredmax_mask_vs_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfredmax_mask_vs_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu @@ -364,7 +366,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -373,9 +375,9 @@ declare @llvm.riscv.vfredmax.nxv2f32.nxv4f32( , , , - i32); + iXLen); -define @intrinsic_vfredmax_vs_nxv2f32_nxv4f32_nxv2f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfredmax_vs_nxv2f32_nxv4f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_vs_nxv2f32_nxv4f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu @@ -386,7 +388,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -396,9 +398,9 @@ declare @llvm.riscv.vfredmax.mask.nxv2f32.nxv4f32.nxv4i1( , , , - i32); + iXLen); -define @intrinsic_vfredmax_mask_vs_nxv2f32_nxv4f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfredmax_mask_vs_nxv2f32_nxv4f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv2f32_nxv4f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu @@ -410,7 +412,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -419,9 +421,9 @@ declare @llvm.riscv.vfredmax.nxv2f32.nxv8f32( , , , - i32); + iXLen); -define @intrinsic_vfredmax_vs_nxv2f32_nxv8f32_nxv2f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfredmax_vs_nxv2f32_nxv8f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_vs_nxv2f32_nxv8f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu @@ -432,7 +434,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -442,9 +444,9 @@ declare @llvm.riscv.vfredmax.mask.nxv2f32.nxv8f32.nxv8i1( , , , - i32); + iXLen); -define @intrinsic_vfredmax_mask_vs_nxv2f32_nxv8f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfredmax_mask_vs_nxv2f32_nxv8f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv2f32_nxv8f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu @@ -456,7 +458,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -465,9 +467,9 @@ declare @llvm.riscv.vfredmax.nxv2f32.nxv16f32( , , , - i32); + iXLen); -define @intrinsic_vfredmax_vs_nxv2f32_nxv16f32_nxv2f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfredmax_vs_nxv2f32_nxv16f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_vs_nxv2f32_nxv16f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu @@ -478,7 +480,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -488,9 +490,9 @@ declare @llvm.riscv.vfredmax.mask.nxv2f32.nxv16f32.nxv16i1( , , , - i32); + iXLen); -define @intrinsic_vfredmax_mask_vs_nxv2f32_nxv16f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfredmax_mask_vs_nxv2f32_nxv16f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv2f32_nxv16f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu @@ -502,7 +504,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -511,9 +513,9 @@ declare @llvm.riscv.vfredmax.nxv1f64.nxv1f64( , , , - i32); + iXLen); -define @intrinsic_vfredmax_vs_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfredmax_vs_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_vs_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu @@ -524,7 +526,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -534,9 +536,9 @@ declare @llvm.riscv.vfredmax.mask.nxv1f64.nxv1f64.nxv1i1( , , , - i32); + iXLen); -define @intrinsic_vfredmax_mask_vs_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfredmax_mask_vs_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu @@ -548,7 +550,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -557,9 +559,9 @@ declare @llvm.riscv.vfredmax.nxv1f64.nxv2f64( , , , - i32); + iXLen); -define @intrinsic_vfredmax_vs_nxv1f64_nxv2f64_nxv1f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfredmax_vs_nxv1f64_nxv2f64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_vs_nxv1f64_nxv2f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu @@ -570,7 +572,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -580,9 +582,9 @@ declare @llvm.riscv.vfredmax.mask.nxv1f64.nxv2f64.nxv2i1( , , , - i32); + iXLen); -define @intrinsic_vfredmax_mask_vs_nxv1f64_nxv2f64_nxv1f64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfredmax_mask_vs_nxv1f64_nxv2f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv1f64_nxv2f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu @@ -594,7 +596,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -603,9 +605,9 @@ declare @llvm.riscv.vfredmax.nxv1f64.nxv4f64( , , , - i32); + iXLen); -define @intrinsic_vfredmax_vs_nxv1f64_nxv4f64_nxv1f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfredmax_vs_nxv1f64_nxv4f64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_vs_nxv1f64_nxv4f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu @@ -616,7 +618,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -626,9 +628,9 @@ declare @llvm.riscv.vfredmax.mask.nxv1f64.nxv4f64.nxv4i1( , , , - i32); + iXLen); -define @intrinsic_vfredmax_mask_vs_nxv1f64_nxv4f64_nxv1f64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfredmax_mask_vs_nxv1f64_nxv4f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv1f64_nxv4f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu @@ -640,7 +642,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -649,9 +651,9 @@ declare @llvm.riscv.vfredmax.nxv1f64.nxv8f64( , , , - i32); + iXLen); -define @intrinsic_vfredmax_vs_nxv1f64_nxv8f64_nxv1f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfredmax_vs_nxv1f64_nxv8f64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_vs_nxv1f64_nxv8f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu @@ -662,7 +664,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -672,9 +674,9 @@ declare @llvm.riscv.vfredmax.mask.nxv1f64.nxv8f64.nxv8i1( , , , - i32); + iXLen); -define @intrinsic_vfredmax_mask_vs_nxv1f64_nxv8f64_nxv1f64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfredmax_mask_vs_nxv1f64_nxv8f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv1f64_nxv8f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu @@ -686,7 +688,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfredmin-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfredmin-rv64.ll deleted file mode 100644 index d04ef7a6707d..000000000000 --- a/llvm/test/CodeGen/RISCV/rvv/vfredmin-rv64.ll +++ /dev/null @@ -1,692 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s -declare @llvm.riscv.vfredmin.nxv4f16.nxv1f16( - , - , - , - i64); - -define @intrinsic_vfredmin_vs_nxv4f16_nxv1f16_nxv4f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfredmin_vs_nxv4f16_nxv1f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu -; CHECK-NEXT: vfredmin.vs v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredmin.nxv4f16.nxv1f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfredmin.mask.nxv4f16.nxv1f16( - , - , - , - , - i64); - -define @intrinsic_vfredmin_mask_vs_nxv4f16_nxv1f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv4f16_nxv1f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu -; CHECK-NEXT: vfredmin.vs v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredmin.mask.nxv4f16.nxv1f16( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfredmin.nxv4f16.nxv2f16( - , - , - , - i64); - -define @intrinsic_vfredmin_vs_nxv4f16_nxv2f16_nxv4f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfredmin_vs_nxv4f16_nxv2f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu -; CHECK-NEXT: vfredmin.vs v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredmin.nxv4f16.nxv2f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfredmin.mask.nxv4f16.nxv2f16( - , - , - , - , - i64); - -define @intrinsic_vfredmin_mask_vs_nxv4f16_nxv2f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv4f16_nxv2f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu -; CHECK-NEXT: vfredmin.vs v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredmin.mask.nxv4f16.nxv2f16( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfredmin.nxv4f16.nxv4f16( - , - , - , - i64); - -define @intrinsic_vfredmin_vs_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfredmin_vs_nxv4f16_nxv4f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu -; CHECK-NEXT: vfredmin.vs v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredmin.nxv4f16.nxv4f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfredmin.mask.nxv4f16.nxv4f16( - , - , - , - , - i64); - -define @intrinsic_vfredmin_mask_vs_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv4f16_nxv4f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu -; CHECK-NEXT: vfredmin.vs v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredmin.mask.nxv4f16.nxv4f16( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfredmin.nxv4f16.nxv8f16( - , - , - , - i64); - -define @intrinsic_vfredmin_vs_nxv4f16_nxv8f16_nxv4f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfredmin_vs_nxv4f16_nxv8f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu -; CHECK-NEXT: vfredmin.vs v8, v10, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredmin.nxv4f16.nxv8f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfredmin.mask.nxv4f16.nxv8f16( - , - , - , - , - i64); - -define @intrinsic_vfredmin_mask_vs_nxv4f16_nxv8f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv4f16_nxv8f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu -; CHECK-NEXT: vfredmin.vs v8, v10, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredmin.mask.nxv4f16.nxv8f16( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfredmin.nxv4f16.nxv16f16( - , - , - , - i64); - -define @intrinsic_vfredmin_vs_nxv4f16_nxv16f16_nxv4f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfredmin_vs_nxv4f16_nxv16f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu -; CHECK-NEXT: vfredmin.vs v8, v12, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredmin.nxv4f16.nxv16f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfredmin.mask.nxv4f16.nxv16f16( - , - , - , - , - i64); - -define @intrinsic_vfredmin_mask_vs_nxv4f16_nxv16f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv4f16_nxv16f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu -; CHECK-NEXT: vfredmin.vs v8, v12, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredmin.mask.nxv4f16.nxv16f16( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfredmin.nxv4f16.nxv32f16( - , - , - , - i64); - -define @intrinsic_vfredmin_vs_nxv4f16_nxv32f16_nxv4f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfredmin_vs_nxv4f16_nxv32f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu -; CHECK-NEXT: vfredmin.vs v8, v16, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredmin.nxv4f16.nxv32f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfredmin.mask.nxv4f16.nxv32f16( - , - , - , - , - i64); - -define @intrinsic_vfredmin_mask_vs_nxv4f16_nxv32f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv4f16_nxv32f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu -; CHECK-NEXT: vfredmin.vs v8, v16, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredmin.mask.nxv4f16.nxv32f16( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfredmin.nxv2f32.nxv1f32( - , - , - , - i64); - -define @intrinsic_vfredmin_vs_nxv2f32_nxv1f32_nxv2f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfredmin_vs_nxv2f32_nxv1f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu -; CHECK-NEXT: vfredmin.vs v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredmin.nxv2f32.nxv1f32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfredmin.mask.nxv2f32.nxv1f32( - , - , - , - , - i64); - -define @intrinsic_vfredmin_mask_vs_nxv2f32_nxv1f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv2f32_nxv1f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu -; CHECK-NEXT: vfredmin.vs v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv1f32( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfredmin.nxv2f32.nxv2f32( - , - , - , - i64); - -define @intrinsic_vfredmin_vs_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfredmin_vs_nxv2f32_nxv2f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu -; CHECK-NEXT: vfredmin.vs v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredmin.nxv2f32.nxv2f32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfredmin.mask.nxv2f32.nxv2f32( - , - , - , - , - i64); - -define @intrinsic_vfredmin_mask_vs_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv2f32_nxv2f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu -; CHECK-NEXT: vfredmin.vs v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv2f32( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfredmin.nxv2f32.nxv4f32( - , - , - , - i64); - -define @intrinsic_vfredmin_vs_nxv2f32_nxv4f32_nxv2f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfredmin_vs_nxv2f32_nxv4f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu -; CHECK-NEXT: vfredmin.vs v8, v10, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredmin.nxv2f32.nxv4f32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfredmin.mask.nxv2f32.nxv4f32( - , - , - , - , - i64); - -define @intrinsic_vfredmin_mask_vs_nxv2f32_nxv4f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv2f32_nxv4f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu -; CHECK-NEXT: vfredmin.vs v8, v10, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv4f32( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfredmin.nxv2f32.nxv8f32( - , - , - , - i64); - -define @intrinsic_vfredmin_vs_nxv2f32_nxv8f32_nxv2f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfredmin_vs_nxv2f32_nxv8f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu -; CHECK-NEXT: vfredmin.vs v8, v12, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredmin.nxv2f32.nxv8f32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfredmin.mask.nxv2f32.nxv8f32( - , - , - , - , - i64); - -define @intrinsic_vfredmin_mask_vs_nxv2f32_nxv8f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv2f32_nxv8f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu -; CHECK-NEXT: vfredmin.vs v8, v12, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv8f32( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfredmin.nxv2f32.nxv16f32( - , - , - , - i64); - -define @intrinsic_vfredmin_vs_nxv2f32_nxv16f32_nxv2f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfredmin_vs_nxv2f32_nxv16f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu -; CHECK-NEXT: vfredmin.vs v8, v16, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredmin.nxv2f32.nxv16f32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfredmin.mask.nxv2f32.nxv16f32( - , - , - , - , - i64); - -define @intrinsic_vfredmin_mask_vs_nxv2f32_nxv16f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv2f32_nxv16f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu -; CHECK-NEXT: vfredmin.vs v8, v16, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv16f32( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfredmin.nxv1f64.nxv1f64( - , - , - , - i64); - -define @intrinsic_vfredmin_vs_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfredmin_vs_nxv1f64_nxv1f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu -; CHECK-NEXT: vfredmin.vs v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredmin.nxv1f64.nxv1f64( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfredmin.mask.nxv1f64.nxv1f64( - , - , - , - , - i64); - -define @intrinsic_vfredmin_mask_vs_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv1f64_nxv1f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu -; CHECK-NEXT: vfredmin.vs v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredmin.mask.nxv1f64.nxv1f64( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfredmin.nxv1f64.nxv2f64( - , - , - , - i64); - -define @intrinsic_vfredmin_vs_nxv1f64_nxv2f64_nxv1f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfredmin_vs_nxv1f64_nxv2f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu -; CHECK-NEXT: vfredmin.vs v8, v10, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredmin.nxv1f64.nxv2f64( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfredmin.mask.nxv1f64.nxv2f64( - , - , - , - , - i64); - -define @intrinsic_vfredmin_mask_vs_nxv1f64_nxv2f64_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv1f64_nxv2f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu -; CHECK-NEXT: vfredmin.vs v8, v10, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredmin.mask.nxv1f64.nxv2f64( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfredmin.nxv1f64.nxv4f64( - , - , - , - i64); - -define @intrinsic_vfredmin_vs_nxv1f64_nxv4f64_nxv1f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfredmin_vs_nxv1f64_nxv4f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu -; CHECK-NEXT: vfredmin.vs v8, v12, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredmin.nxv1f64.nxv4f64( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfredmin.mask.nxv1f64.nxv4f64( - , - , - , - , - i64); - -define @intrinsic_vfredmin_mask_vs_nxv1f64_nxv4f64_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv1f64_nxv4f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu -; CHECK-NEXT: vfredmin.vs v8, v12, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredmin.mask.nxv1f64.nxv4f64( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfredmin.nxv1f64.nxv8f64( - , - , - , - i64); - -define @intrinsic_vfredmin_vs_nxv1f64_nxv8f64_nxv1f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfredmin_vs_nxv1f64_nxv8f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu -; CHECK-NEXT: vfredmin.vs v8, v16, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredmin.nxv1f64.nxv8f64( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfredmin.mask.nxv1f64.nxv8f64( - , - , - , - , - i64); - -define @intrinsic_vfredmin_mask_vs_nxv1f64_nxv8f64_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv1f64_nxv8f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu -; CHECK-NEXT: vfredmin.vs v8, v16, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredmin.mask.nxv1f64.nxv8f64( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfredmin-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfredmin.ll similarity index 88% rename from llvm/test/CodeGen/RISCV/rvv/vfredmin-rv32.ll rename to llvm/test/CodeGen/RISCV/rvv/vfredmin.ll index 9561be7b6fc0..4d0301d3485c 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfredmin-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfredmin.ll @@ -1,13 +1,15 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfredmin.nxv4f16.nxv1f16( , , , - i32); + iXLen); -define @intrinsic_vfredmin_vs_nxv4f16_nxv1f16_nxv4f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfredmin_vs_nxv4f16_nxv1f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_vs_nxv4f16_nxv1f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu @@ -18,7 +20,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -28,9 +30,9 @@ declare @llvm.riscv.vfredmin.mask.nxv4f16.nxv1f16.nxv1i1( , , , - i32); + iXLen); -define @intrinsic_vfredmin_mask_vs_nxv4f16_nxv1f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfredmin_mask_vs_nxv4f16_nxv1f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv4f16_nxv1f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu @@ -42,7 +44,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -51,9 +53,9 @@ declare @llvm.riscv.vfredmin.nxv4f16.nxv2f16( , , , - i32); + iXLen); -define @intrinsic_vfredmin_vs_nxv4f16_nxv2f16_nxv4f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfredmin_vs_nxv4f16_nxv2f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_vs_nxv4f16_nxv2f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu @@ -64,7 +66,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -74,9 +76,9 @@ declare @llvm.riscv.vfredmin.mask.nxv4f16.nxv2f16.nxv2i1( , , , - i32); + iXLen); -define @intrinsic_vfredmin_mask_vs_nxv4f16_nxv2f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfredmin_mask_vs_nxv4f16_nxv2f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv4f16_nxv2f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu @@ -88,7 +90,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -97,9 +99,9 @@ declare @llvm.riscv.vfredmin.nxv4f16.nxv4f16( , , , - i32); + iXLen); -define @intrinsic_vfredmin_vs_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfredmin_vs_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_vs_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu @@ -110,7 +112,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -120,9 +122,9 @@ declare @llvm.riscv.vfredmin.mask.nxv4f16.nxv4f16.nxv4i1( , , , - i32); + iXLen); -define @intrinsic_vfredmin_mask_vs_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfredmin_mask_vs_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu @@ -134,7 +136,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -143,9 +145,9 @@ declare @llvm.riscv.vfredmin.nxv4f16.nxv8f16( , , , - i32); + iXLen); -define @intrinsic_vfredmin_vs_nxv4f16_nxv8f16_nxv4f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfredmin_vs_nxv4f16_nxv8f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_vs_nxv4f16_nxv8f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu @@ -156,7 +158,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -166,9 +168,9 @@ declare @llvm.riscv.vfredmin.mask.nxv4f16.nxv8f16.nxv8i1( , , , - i32); + iXLen); -define @intrinsic_vfredmin_mask_vs_nxv4f16_nxv8f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfredmin_mask_vs_nxv4f16_nxv8f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv4f16_nxv8f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu @@ -180,7 +182,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -189,9 +191,9 @@ declare @llvm.riscv.vfredmin.nxv4f16.nxv16f16( , , , - i32); + iXLen); -define @intrinsic_vfredmin_vs_nxv4f16_nxv16f16_nxv4f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfredmin_vs_nxv4f16_nxv16f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_vs_nxv4f16_nxv16f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu @@ -202,7 +204,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -212,9 +214,9 @@ declare @llvm.riscv.vfredmin.mask.nxv4f16.nxv16f16.nxv16i1( , , , - i32); + iXLen); -define @intrinsic_vfredmin_mask_vs_nxv4f16_nxv16f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfredmin_mask_vs_nxv4f16_nxv16f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv4f16_nxv16f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu @@ -226,7 +228,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -235,9 +237,9 @@ declare @llvm.riscv.vfredmin.nxv4f16.nxv32f16( , , , - i32); + iXLen); -define @intrinsic_vfredmin_vs_nxv4f16_nxv32f16_nxv4f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfredmin_vs_nxv4f16_nxv32f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_vs_nxv4f16_nxv32f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu @@ -248,7 +250,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -258,9 +260,9 @@ declare @llvm.riscv.vfredmin.mask.nxv4f16.nxv32f16.nxv32i1( , , , - i32); + iXLen); -define @intrinsic_vfredmin_mask_vs_nxv4f16_nxv32f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfredmin_mask_vs_nxv4f16_nxv32f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv4f16_nxv32f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu @@ -272,7 +274,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -281,9 +283,9 @@ declare @llvm.riscv.vfredmin.nxv2f32.nxv1f32( , , , - i32); + iXLen); -define @intrinsic_vfredmin_vs_nxv2f32_nxv1f32_nxv2f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfredmin_vs_nxv2f32_nxv1f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_vs_nxv2f32_nxv1f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu @@ -294,7 +296,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -304,9 +306,9 @@ declare @llvm.riscv.vfredmin.mask.nxv2f32.nxv1f32.nxv1i1( , , , - i32); + iXLen); -define @intrinsic_vfredmin_mask_vs_nxv2f32_nxv1f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfredmin_mask_vs_nxv2f32_nxv1f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv2f32_nxv1f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu @@ -318,7 +320,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -327,9 +329,9 @@ declare @llvm.riscv.vfredmin.nxv2f32.nxv2f32( , , , - i32); + iXLen); -define @intrinsic_vfredmin_vs_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfredmin_vs_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_vs_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu @@ -340,7 +342,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -350,9 +352,9 @@ declare @llvm.riscv.vfredmin.mask.nxv2f32.nxv2f32.nxv2i1( , , , - i32); + iXLen); -define @intrinsic_vfredmin_mask_vs_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfredmin_mask_vs_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu @@ -364,7 +366,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -373,9 +375,9 @@ declare @llvm.riscv.vfredmin.nxv2f32.nxv4f32( , , , - i32); + iXLen); -define @intrinsic_vfredmin_vs_nxv2f32_nxv4f32_nxv2f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfredmin_vs_nxv2f32_nxv4f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_vs_nxv2f32_nxv4f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu @@ -386,7 +388,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -396,9 +398,9 @@ declare @llvm.riscv.vfredmin.mask.nxv2f32.nxv4f32.nxv4i1( , , , - i32); + iXLen); -define @intrinsic_vfredmin_mask_vs_nxv2f32_nxv4f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfredmin_mask_vs_nxv2f32_nxv4f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv2f32_nxv4f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu @@ -410,7 +412,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -419,9 +421,9 @@ declare @llvm.riscv.vfredmin.nxv2f32.nxv8f32( , , , - i32); + iXLen); -define @intrinsic_vfredmin_vs_nxv2f32_nxv8f32_nxv2f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfredmin_vs_nxv2f32_nxv8f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_vs_nxv2f32_nxv8f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu @@ -432,7 +434,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -442,9 +444,9 @@ declare @llvm.riscv.vfredmin.mask.nxv2f32.nxv8f32.nxv8i1( , , , - i32); + iXLen); -define @intrinsic_vfredmin_mask_vs_nxv2f32_nxv8f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfredmin_mask_vs_nxv2f32_nxv8f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv2f32_nxv8f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu @@ -456,7 +458,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -465,9 +467,9 @@ declare @llvm.riscv.vfredmin.nxv2f32.nxv16f32( , , , - i32); + iXLen); -define @intrinsic_vfredmin_vs_nxv2f32_nxv16f32_nxv2f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfredmin_vs_nxv2f32_nxv16f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_vs_nxv2f32_nxv16f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu @@ -478,7 +480,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -488,9 +490,9 @@ declare @llvm.riscv.vfredmin.mask.nxv2f32.nxv16f32.nxv16i1( , , , - i32); + iXLen); -define @intrinsic_vfredmin_mask_vs_nxv2f32_nxv16f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfredmin_mask_vs_nxv2f32_nxv16f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv2f32_nxv16f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu @@ -502,7 +504,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -511,9 +513,9 @@ declare @llvm.riscv.vfredmin.nxv1f64.nxv1f64( , , , - i32); + iXLen); -define @intrinsic_vfredmin_vs_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfredmin_vs_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_vs_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu @@ -524,7 +526,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -534,9 +536,9 @@ declare @llvm.riscv.vfredmin.mask.nxv1f64.nxv1f64.nxv1i1( , , , - i32); + iXLen); -define @intrinsic_vfredmin_mask_vs_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfredmin_mask_vs_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu @@ -548,7 +550,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -557,9 +559,9 @@ declare @llvm.riscv.vfredmin.nxv1f64.nxv2f64( , , , - i32); + iXLen); -define @intrinsic_vfredmin_vs_nxv1f64_nxv2f64_nxv1f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfredmin_vs_nxv1f64_nxv2f64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_vs_nxv1f64_nxv2f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu @@ -570,7 +572,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -580,9 +582,9 @@ declare @llvm.riscv.vfredmin.mask.nxv1f64.nxv2f64.nxv2i1( , , , - i32); + iXLen); -define @intrinsic_vfredmin_mask_vs_nxv1f64_nxv2f64_nxv1f64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfredmin_mask_vs_nxv1f64_nxv2f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv1f64_nxv2f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu @@ -594,7 +596,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -603,9 +605,9 @@ declare @llvm.riscv.vfredmin.nxv1f64.nxv4f64( , , , - i32); + iXLen); -define @intrinsic_vfredmin_vs_nxv1f64_nxv4f64_nxv1f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfredmin_vs_nxv1f64_nxv4f64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_vs_nxv1f64_nxv4f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu @@ -616,7 +618,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -626,9 +628,9 @@ declare @llvm.riscv.vfredmin.mask.nxv1f64.nxv4f64.nxv4i1( , , , - i32); + iXLen); -define @intrinsic_vfredmin_mask_vs_nxv1f64_nxv4f64_nxv1f64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfredmin_mask_vs_nxv1f64_nxv4f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv1f64_nxv4f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu @@ -640,7 +642,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -649,9 +651,9 @@ declare @llvm.riscv.vfredmin.nxv1f64.nxv8f64( , , , - i32); + iXLen); -define @intrinsic_vfredmin_vs_nxv1f64_nxv8f64_nxv1f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfredmin_vs_nxv1f64_nxv8f64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_vs_nxv1f64_nxv8f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu @@ -662,7 +664,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -672,9 +674,9 @@ declare @llvm.riscv.vfredmin.mask.nxv1f64.nxv8f64.nxv8i1( , , , - i32); + iXLen); -define @intrinsic_vfredmin_mask_vs_nxv1f64_nxv8f64_nxv1f64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfredmin_mask_vs_nxv1f64_nxv8f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv1f64_nxv8f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu @@ -686,7 +688,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfredosum-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfredosum-rv64.ll deleted file mode 100644 index 8c42e43d9094..000000000000 --- a/llvm/test/CodeGen/RISCV/rvv/vfredosum-rv64.ll +++ /dev/null @@ -1,692 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s -declare @llvm.riscv.vfredosum.nxv4f16.nxv1f16( - , - , - , - i64); - -define @intrinsic_vfredosum_vs_nxv4f16_nxv1f16_nxv4f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfredosum_vs_nxv4f16_nxv1f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu -; CHECK-NEXT: vfredosum.vs v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredosum.nxv4f16.nxv1f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfredosum.mask.nxv4f16.nxv1f16( - , - , - , - , - i64); - -define @intrinsic_vfredosum_mask_vs_nxv4f16_nxv1f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv4f16_nxv1f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu -; CHECK-NEXT: vfredosum.vs v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredosum.mask.nxv4f16.nxv1f16( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfredosum.nxv4f16.nxv2f16( - , - , - , - i64); - -define @intrinsic_vfredosum_vs_nxv4f16_nxv2f16_nxv4f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfredosum_vs_nxv4f16_nxv2f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu -; CHECK-NEXT: vfredosum.vs v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredosum.nxv4f16.nxv2f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfredosum.mask.nxv4f16.nxv2f16( - , - , - , - , - i64); - -define @intrinsic_vfredosum_mask_vs_nxv4f16_nxv2f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv4f16_nxv2f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu -; CHECK-NEXT: vfredosum.vs v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredosum.mask.nxv4f16.nxv2f16( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfredosum.nxv4f16.nxv4f16( - , - , - , - i64); - -define @intrinsic_vfredosum_vs_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfredosum_vs_nxv4f16_nxv4f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu -; CHECK-NEXT: vfredosum.vs v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredosum.nxv4f16.nxv4f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfredosum.mask.nxv4f16.nxv4f16( - , - , - , - , - i64); - -define @intrinsic_vfredosum_mask_vs_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv4f16_nxv4f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu -; CHECK-NEXT: vfredosum.vs v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredosum.mask.nxv4f16.nxv4f16( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfredosum.nxv4f16.nxv8f16( - , - , - , - i64); - -define @intrinsic_vfredosum_vs_nxv4f16_nxv8f16_nxv4f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfredosum_vs_nxv4f16_nxv8f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu -; CHECK-NEXT: vfredosum.vs v8, v10, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredosum.nxv4f16.nxv8f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfredosum.mask.nxv4f16.nxv8f16( - , - , - , - , - i64); - -define @intrinsic_vfredosum_mask_vs_nxv4f16_nxv8f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv4f16_nxv8f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu -; CHECK-NEXT: vfredosum.vs v8, v10, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredosum.mask.nxv4f16.nxv8f16( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfredosum.nxv4f16.nxv16f16( - , - , - , - i64); - -define @intrinsic_vfredosum_vs_nxv4f16_nxv16f16_nxv4f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfredosum_vs_nxv4f16_nxv16f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu -; CHECK-NEXT: vfredosum.vs v8, v12, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredosum.nxv4f16.nxv16f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfredosum.mask.nxv4f16.nxv16f16( - , - , - , - , - i64); - -define @intrinsic_vfredosum_mask_vs_nxv4f16_nxv16f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv4f16_nxv16f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu -; CHECK-NEXT: vfredosum.vs v8, v12, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredosum.mask.nxv4f16.nxv16f16( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfredosum.nxv4f16.nxv32f16( - , - , - , - i64); - -define @intrinsic_vfredosum_vs_nxv4f16_nxv32f16_nxv4f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfredosum_vs_nxv4f16_nxv32f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu -; CHECK-NEXT: vfredosum.vs v8, v16, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredosum.nxv4f16.nxv32f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfredosum.mask.nxv4f16.nxv32f16( - , - , - , - , - i64); - -define @intrinsic_vfredosum_mask_vs_nxv4f16_nxv32f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv4f16_nxv32f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu -; CHECK-NEXT: vfredosum.vs v8, v16, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredosum.mask.nxv4f16.nxv32f16( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfredosum.nxv2f32.nxv1f32( - , - , - , - i64); - -define @intrinsic_vfredosum_vs_nxv2f32_nxv1f32_nxv2f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfredosum_vs_nxv2f32_nxv1f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu -; CHECK-NEXT: vfredosum.vs v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredosum.nxv2f32.nxv1f32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfredosum.mask.nxv2f32.nxv1f32( - , - , - , - , - i64); - -define @intrinsic_vfredosum_mask_vs_nxv2f32_nxv1f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv2f32_nxv1f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu -; CHECK-NEXT: vfredosum.vs v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv1f32( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfredosum.nxv2f32.nxv2f32( - , - , - , - i64); - -define @intrinsic_vfredosum_vs_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfredosum_vs_nxv2f32_nxv2f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu -; CHECK-NEXT: vfredosum.vs v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredosum.nxv2f32.nxv2f32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfredosum.mask.nxv2f32.nxv2f32( - , - , - , - , - i64); - -define @intrinsic_vfredosum_mask_vs_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv2f32_nxv2f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu -; CHECK-NEXT: vfredosum.vs v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv2f32( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfredosum.nxv2f32.nxv4f32( - , - , - , - i64); - -define @intrinsic_vfredosum_vs_nxv2f32_nxv4f32_nxv2f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfredosum_vs_nxv2f32_nxv4f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu -; CHECK-NEXT: vfredosum.vs v8, v10, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredosum.nxv2f32.nxv4f32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfredosum.mask.nxv2f32.nxv4f32( - , - , - , - , - i64); - -define @intrinsic_vfredosum_mask_vs_nxv2f32_nxv4f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv2f32_nxv4f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu -; CHECK-NEXT: vfredosum.vs v8, v10, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv4f32( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfredosum.nxv2f32.nxv8f32( - , - , - , - i64); - -define @intrinsic_vfredosum_vs_nxv2f32_nxv8f32_nxv2f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfredosum_vs_nxv2f32_nxv8f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu -; CHECK-NEXT: vfredosum.vs v8, v12, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredosum.nxv2f32.nxv8f32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfredosum.mask.nxv2f32.nxv8f32( - , - , - , - , - i64); - -define @intrinsic_vfredosum_mask_vs_nxv2f32_nxv8f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv2f32_nxv8f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu -; CHECK-NEXT: vfredosum.vs v8, v12, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv8f32( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfredosum.nxv2f32.nxv16f32( - , - , - , - i64); - -define @intrinsic_vfredosum_vs_nxv2f32_nxv16f32_nxv2f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfredosum_vs_nxv2f32_nxv16f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu -; CHECK-NEXT: vfredosum.vs v8, v16, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredosum.nxv2f32.nxv16f32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfredosum.mask.nxv2f32.nxv16f32( - , - , - , - , - i64); - -define @intrinsic_vfredosum_mask_vs_nxv2f32_nxv16f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv2f32_nxv16f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu -; CHECK-NEXT: vfredosum.vs v8, v16, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv16f32( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfredosum.nxv1f64.nxv1f64( - , - , - , - i64); - -define @intrinsic_vfredosum_vs_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfredosum_vs_nxv1f64_nxv1f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu -; CHECK-NEXT: vfredosum.vs v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredosum.nxv1f64.nxv1f64( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfredosum.mask.nxv1f64.nxv1f64( - , - , - , - , - i64); - -define @intrinsic_vfredosum_mask_vs_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv1f64_nxv1f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu -; CHECK-NEXT: vfredosum.vs v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredosum.mask.nxv1f64.nxv1f64( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfredosum.nxv1f64.nxv2f64( - , - , - , - i64); - -define @intrinsic_vfredosum_vs_nxv1f64_nxv2f64_nxv1f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfredosum_vs_nxv1f64_nxv2f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu -; CHECK-NEXT: vfredosum.vs v8, v10, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredosum.nxv1f64.nxv2f64( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfredosum.mask.nxv1f64.nxv2f64( - , - , - , - , - i64); - -define @intrinsic_vfredosum_mask_vs_nxv1f64_nxv2f64_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv1f64_nxv2f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu -; CHECK-NEXT: vfredosum.vs v8, v10, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredosum.mask.nxv1f64.nxv2f64( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfredosum.nxv1f64.nxv4f64( - , - , - , - i64); - -define @intrinsic_vfredosum_vs_nxv1f64_nxv4f64_nxv1f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfredosum_vs_nxv1f64_nxv4f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu -; CHECK-NEXT: vfredosum.vs v8, v12, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredosum.nxv1f64.nxv4f64( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfredosum.mask.nxv1f64.nxv4f64( - , - , - , - , - i64); - -define @intrinsic_vfredosum_mask_vs_nxv1f64_nxv4f64_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv1f64_nxv4f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu -; CHECK-NEXT: vfredosum.vs v8, v12, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredosum.mask.nxv1f64.nxv4f64( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfredosum.nxv1f64.nxv8f64( - , - , - , - i64); - -define @intrinsic_vfredosum_vs_nxv1f64_nxv8f64_nxv1f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfredosum_vs_nxv1f64_nxv8f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu -; CHECK-NEXT: vfredosum.vs v8, v16, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredosum.nxv1f64.nxv8f64( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfredosum.mask.nxv1f64.nxv8f64( - , - , - , - , - i64); - -define @intrinsic_vfredosum_mask_vs_nxv1f64_nxv8f64_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv1f64_nxv8f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu -; CHECK-NEXT: vfredosum.vs v8, v16, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredosum.mask.nxv1f64.nxv8f64( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfredosum-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfredosum.ll similarity index 88% rename from llvm/test/CodeGen/RISCV/rvv/vfredosum-rv32.ll rename to llvm/test/CodeGen/RISCV/rvv/vfredosum.ll index 1f1e68e0dbc9..b814315d90cd 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfredosum-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfredosum.ll @@ -1,13 +1,15 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfredosum.nxv4f16.nxv1f16( , , , - i32); + iXLen); -define @intrinsic_vfredosum_vs_nxv4f16_nxv1f16_nxv4f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfredosum_vs_nxv4f16_nxv1f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_vs_nxv4f16_nxv1f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu @@ -18,7 +20,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -28,9 +30,9 @@ declare @llvm.riscv.vfredosum.mask.nxv4f16.nxv1f16.nxv1i1( , , , - i32); + iXLen); -define @intrinsic_vfredosum_mask_vs_nxv4f16_nxv1f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfredosum_mask_vs_nxv4f16_nxv1f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv4f16_nxv1f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu @@ -42,7 +44,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -51,9 +53,9 @@ declare @llvm.riscv.vfredosum.nxv4f16.nxv2f16( , , , - i32); + iXLen); -define @intrinsic_vfredosum_vs_nxv4f16_nxv2f16_nxv4f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfredosum_vs_nxv4f16_nxv2f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_vs_nxv4f16_nxv2f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu @@ -64,7 +66,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -74,9 +76,9 @@ declare @llvm.riscv.vfredosum.mask.nxv4f16.nxv2f16.nxv2i1( , , , - i32); + iXLen); -define @intrinsic_vfredosum_mask_vs_nxv4f16_nxv2f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfredosum_mask_vs_nxv4f16_nxv2f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv4f16_nxv2f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu @@ -88,7 +90,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -97,9 +99,9 @@ declare @llvm.riscv.vfredosum.nxv4f16.nxv4f16( , , , - i32); + iXLen); -define @intrinsic_vfredosum_vs_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfredosum_vs_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_vs_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu @@ -110,7 +112,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -120,9 +122,9 @@ declare @llvm.riscv.vfredosum.mask.nxv4f16.nxv4f16.nxv4i1( , , , - i32); + iXLen); -define @intrinsic_vfredosum_mask_vs_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfredosum_mask_vs_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu @@ -134,7 +136,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -143,9 +145,9 @@ declare @llvm.riscv.vfredosum.nxv4f16.nxv8f16( , , , - i32); + iXLen); -define @intrinsic_vfredosum_vs_nxv4f16_nxv8f16_nxv4f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfredosum_vs_nxv4f16_nxv8f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_vs_nxv4f16_nxv8f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu @@ -156,7 +158,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -166,9 +168,9 @@ declare @llvm.riscv.vfredosum.mask.nxv4f16.nxv8f16.nxv8i1( , , , - i32); + iXLen); -define @intrinsic_vfredosum_mask_vs_nxv4f16_nxv8f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfredosum_mask_vs_nxv4f16_nxv8f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv4f16_nxv8f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu @@ -180,7 +182,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -189,9 +191,9 @@ declare @llvm.riscv.vfredosum.nxv4f16.nxv16f16( , , , - i32); + iXLen); -define @intrinsic_vfredosum_vs_nxv4f16_nxv16f16_nxv4f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfredosum_vs_nxv4f16_nxv16f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_vs_nxv4f16_nxv16f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu @@ -202,7 +204,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -212,9 +214,9 @@ declare @llvm.riscv.vfredosum.mask.nxv4f16.nxv16f16.nxv16i1( , , , - i32); + iXLen); -define @intrinsic_vfredosum_mask_vs_nxv4f16_nxv16f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfredosum_mask_vs_nxv4f16_nxv16f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv4f16_nxv16f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu @@ -226,7 +228,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -235,9 +237,9 @@ declare @llvm.riscv.vfredosum.nxv4f16.nxv32f16( , , , - i32); + iXLen); -define @intrinsic_vfredosum_vs_nxv4f16_nxv32f16_nxv4f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfredosum_vs_nxv4f16_nxv32f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_vs_nxv4f16_nxv32f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu @@ -248,7 +250,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -258,9 +260,9 @@ declare @llvm.riscv.vfredosum.mask.nxv4f16.nxv32f16.nxv32i1( , , , - i32); + iXLen); -define @intrinsic_vfredosum_mask_vs_nxv4f16_nxv32f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfredosum_mask_vs_nxv4f16_nxv32f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv4f16_nxv32f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu @@ -272,7 +274,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -281,9 +283,9 @@ declare @llvm.riscv.vfredosum.nxv2f32.nxv1f32( , , , - i32); + iXLen); -define @intrinsic_vfredosum_vs_nxv2f32_nxv1f32_nxv2f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfredosum_vs_nxv2f32_nxv1f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_vs_nxv2f32_nxv1f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu @@ -294,7 +296,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -304,9 +306,9 @@ declare @llvm.riscv.vfredosum.mask.nxv2f32.nxv1f32.nxv1i1( , , , - i32); + iXLen); -define @intrinsic_vfredosum_mask_vs_nxv2f32_nxv1f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfredosum_mask_vs_nxv2f32_nxv1f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv2f32_nxv1f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu @@ -318,7 +320,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -327,9 +329,9 @@ declare @llvm.riscv.vfredosum.nxv2f32.nxv2f32( , , , - i32); + iXLen); -define @intrinsic_vfredosum_vs_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfredosum_vs_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_vs_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu @@ -340,7 +342,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -350,9 +352,9 @@ declare @llvm.riscv.vfredosum.mask.nxv2f32.nxv2f32.nxv2i1( , , , - i32); + iXLen); -define @intrinsic_vfredosum_mask_vs_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfredosum_mask_vs_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu @@ -364,7 +366,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -373,9 +375,9 @@ declare @llvm.riscv.vfredosum.nxv2f32.nxv4f32( , , , - i32); + iXLen); -define @intrinsic_vfredosum_vs_nxv2f32_nxv4f32_nxv2f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfredosum_vs_nxv2f32_nxv4f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_vs_nxv2f32_nxv4f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu @@ -386,7 +388,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -396,9 +398,9 @@ declare @llvm.riscv.vfredosum.mask.nxv2f32.nxv4f32.nxv4i1( , , , - i32); + iXLen); -define @intrinsic_vfredosum_mask_vs_nxv2f32_nxv4f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfredosum_mask_vs_nxv2f32_nxv4f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv2f32_nxv4f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu @@ -410,7 +412,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -419,9 +421,9 @@ declare @llvm.riscv.vfredosum.nxv2f32.nxv8f32( , , , - i32); + iXLen); -define @intrinsic_vfredosum_vs_nxv2f32_nxv8f32_nxv2f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfredosum_vs_nxv2f32_nxv8f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_vs_nxv2f32_nxv8f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu @@ -432,7 +434,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -442,9 +444,9 @@ declare @llvm.riscv.vfredosum.mask.nxv2f32.nxv8f32.nxv8i1( , , , - i32); + iXLen); -define @intrinsic_vfredosum_mask_vs_nxv2f32_nxv8f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfredosum_mask_vs_nxv2f32_nxv8f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv2f32_nxv8f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu @@ -456,7 +458,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -465,9 +467,9 @@ declare @llvm.riscv.vfredosum.nxv2f32.nxv16f32( , , , - i32); + iXLen); -define @intrinsic_vfredosum_vs_nxv2f32_nxv16f32_nxv2f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfredosum_vs_nxv2f32_nxv16f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_vs_nxv2f32_nxv16f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu @@ -478,7 +480,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -488,9 +490,9 @@ declare @llvm.riscv.vfredosum.mask.nxv2f32.nxv16f32.nxv16i1 , , , - i32); + iXLen); -define @intrinsic_vfredosum_mask_vs_nxv2f32_nxv16f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfredosum_mask_vs_nxv2f32_nxv16f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv2f32_nxv16f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu @@ -502,7 +504,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -511,9 +513,9 @@ declare @llvm.riscv.vfredosum.nxv1f64.nxv1f64( , , , - i32); + iXLen); -define @intrinsic_vfredosum_vs_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfredosum_vs_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_vs_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu @@ -524,7 +526,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -534,9 +536,9 @@ declare @llvm.riscv.vfredosum.mask.nxv1f64.nxv1f64.nxv1i1( , , , - i32); + iXLen); -define @intrinsic_vfredosum_mask_vs_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfredosum_mask_vs_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu @@ -548,7 +550,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -557,9 +559,9 @@ declare @llvm.riscv.vfredosum.nxv1f64.nxv2f64( , , , - i32); + iXLen); -define @intrinsic_vfredosum_vs_nxv1f64_nxv2f64_nxv1f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfredosum_vs_nxv1f64_nxv2f64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_vs_nxv1f64_nxv2f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu @@ -570,7 +572,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -580,9 +582,9 @@ declare @llvm.riscv.vfredosum.mask.nxv1f64.nxv2f64.nxv2i1( , , , - i32); + iXLen); -define @intrinsic_vfredosum_mask_vs_nxv1f64_nxv2f64_nxv1f64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfredosum_mask_vs_nxv1f64_nxv2f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv1f64_nxv2f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu @@ -594,7 +596,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -603,9 +605,9 @@ declare @llvm.riscv.vfredosum.nxv1f64.nxv4f64( , , , - i32); + iXLen); -define @intrinsic_vfredosum_vs_nxv1f64_nxv4f64_nxv1f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfredosum_vs_nxv1f64_nxv4f64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_vs_nxv1f64_nxv4f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu @@ -616,7 +618,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -626,9 +628,9 @@ declare @llvm.riscv.vfredosum.mask.nxv1f64.nxv4f64.nxv4i1( , , , - i32); + iXLen); -define @intrinsic_vfredosum_mask_vs_nxv1f64_nxv4f64_nxv1f64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfredosum_mask_vs_nxv1f64_nxv4f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv1f64_nxv4f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu @@ -640,7 +642,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -649,9 +651,9 @@ declare @llvm.riscv.vfredosum.nxv1f64.nxv8f64( , , , - i32); + iXLen); -define @intrinsic_vfredosum_vs_nxv1f64_nxv8f64_nxv1f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfredosum_vs_nxv1f64_nxv8f64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_vs_nxv1f64_nxv8f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu @@ -662,7 +664,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -672,9 +674,9 @@ declare @llvm.riscv.vfredosum.mask.nxv1f64.nxv8f64.nxv8i1( , , , - i32); + iXLen); -define @intrinsic_vfredosum_mask_vs_nxv1f64_nxv8f64_nxv1f64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfredosum_mask_vs_nxv1f64_nxv8f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv1f64_nxv8f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu @@ -686,7 +688,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfredusum-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfredusum-rv64.ll deleted file mode 100644 index 9264397afa0e..000000000000 --- a/llvm/test/CodeGen/RISCV/rvv/vfredusum-rv64.ll +++ /dev/null @@ -1,692 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s -declare @llvm.riscv.vfredusum.nxv4f16.nxv1f16( - , - , - , - i64); - -define @intrinsic_vfredusum_vs_nxv4f16_nxv1f16_nxv4f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfredusum_vs_nxv4f16_nxv1f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu -; CHECK-NEXT: vfredusum.vs v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredusum.nxv4f16.nxv1f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfredusum.mask.nxv4f16.nxv1f16( - , - , - , - , - i64); - -define @intrinsic_vfredusum_mask_vs_nxv4f16_nxv1f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv4f16_nxv1f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu -; CHECK-NEXT: vfredusum.vs v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredusum.mask.nxv4f16.nxv1f16( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfredusum.nxv4f16.nxv2f16( - , - , - , - i64); - -define @intrinsic_vfredusum_vs_nxv4f16_nxv2f16_nxv4f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfredusum_vs_nxv4f16_nxv2f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu -; CHECK-NEXT: vfredusum.vs v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredusum.nxv4f16.nxv2f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfredusum.mask.nxv4f16.nxv2f16( - , - , - , - , - i64); - -define @intrinsic_vfredusum_mask_vs_nxv4f16_nxv2f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv4f16_nxv2f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu -; CHECK-NEXT: vfredusum.vs v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredusum.mask.nxv4f16.nxv2f16( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfredusum.nxv4f16.nxv4f16( - , - , - , - i64); - -define @intrinsic_vfredusum_vs_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfredusum_vs_nxv4f16_nxv4f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu -; CHECK-NEXT: vfredusum.vs v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredusum.nxv4f16.nxv4f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfredusum.mask.nxv4f16.nxv4f16( - , - , - , - , - i64); - -define @intrinsic_vfredusum_mask_vs_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv4f16_nxv4f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu -; CHECK-NEXT: vfredusum.vs v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredusum.mask.nxv4f16.nxv4f16( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfredusum.nxv4f16.nxv8f16( - , - , - , - i64); - -define @intrinsic_vfredusum_vs_nxv4f16_nxv8f16_nxv4f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfredusum_vs_nxv4f16_nxv8f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu -; CHECK-NEXT: vfredusum.vs v8, v10, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredusum.nxv4f16.nxv8f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfredusum.mask.nxv4f16.nxv8f16( - , - , - , - , - i64); - -define @intrinsic_vfredusum_mask_vs_nxv4f16_nxv8f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv4f16_nxv8f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu -; CHECK-NEXT: vfredusum.vs v8, v10, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredusum.mask.nxv4f16.nxv8f16( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfredusum.nxv4f16.nxv16f16( - , - , - , - i64); - -define @intrinsic_vfredusum_vs_nxv4f16_nxv16f16_nxv4f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfredusum_vs_nxv4f16_nxv16f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu -; CHECK-NEXT: vfredusum.vs v8, v12, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredusum.nxv4f16.nxv16f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfredusum.mask.nxv4f16.nxv16f16( - , - , - , - , - i64); - -define @intrinsic_vfredusum_mask_vs_nxv4f16_nxv16f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv4f16_nxv16f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu -; CHECK-NEXT: vfredusum.vs v8, v12, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredusum.mask.nxv4f16.nxv16f16( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfredusum.nxv4f16.nxv32f16( - , - , - , - i64); - -define @intrinsic_vfredusum_vs_nxv4f16_nxv32f16_nxv4f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfredusum_vs_nxv4f16_nxv32f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu -; CHECK-NEXT: vfredusum.vs v8, v16, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredusum.nxv4f16.nxv32f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfredusum.mask.nxv4f16.nxv32f16( - , - , - , - , - i64); - -define @intrinsic_vfredusum_mask_vs_nxv4f16_nxv32f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv4f16_nxv32f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu -; CHECK-NEXT: vfredusum.vs v8, v16, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredusum.mask.nxv4f16.nxv32f16( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfredusum.nxv2f32.nxv1f32( - , - , - , - i64); - -define @intrinsic_vfredusum_vs_nxv2f32_nxv1f32_nxv2f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfredusum_vs_nxv2f32_nxv1f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu -; CHECK-NEXT: vfredusum.vs v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredusum.nxv2f32.nxv1f32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfredusum.mask.nxv2f32.nxv1f32( - , - , - , - , - i64); - -define @intrinsic_vfredusum_mask_vs_nxv2f32_nxv1f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv2f32_nxv1f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu -; CHECK-NEXT: vfredusum.vs v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredusum.mask.nxv2f32.nxv1f32( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfredusum.nxv2f32.nxv2f32( - , - , - , - i64); - -define @intrinsic_vfredusum_vs_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfredusum_vs_nxv2f32_nxv2f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu -; CHECK-NEXT: vfredusum.vs v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredusum.nxv2f32.nxv2f32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfredusum.mask.nxv2f32.nxv2f32( - , - , - , - , - i64); - -define @intrinsic_vfredusum_mask_vs_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv2f32_nxv2f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu -; CHECK-NEXT: vfredusum.vs v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredusum.mask.nxv2f32.nxv2f32( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfredusum.nxv2f32.nxv4f32( - , - , - , - i64); - -define @intrinsic_vfredusum_vs_nxv2f32_nxv4f32_nxv2f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfredusum_vs_nxv2f32_nxv4f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu -; CHECK-NEXT: vfredusum.vs v8, v10, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredusum.nxv2f32.nxv4f32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfredusum.mask.nxv2f32.nxv4f32( - , - , - , - , - i64); - -define @intrinsic_vfredusum_mask_vs_nxv2f32_nxv4f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv2f32_nxv4f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu -; CHECK-NEXT: vfredusum.vs v8, v10, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredusum.mask.nxv2f32.nxv4f32( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfredusum.nxv2f32.nxv8f32( - , - , - , - i64); - -define @intrinsic_vfredusum_vs_nxv2f32_nxv8f32_nxv2f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfredusum_vs_nxv2f32_nxv8f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu -; CHECK-NEXT: vfredusum.vs v8, v12, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredusum.nxv2f32.nxv8f32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfredusum.mask.nxv2f32.nxv8f32( - , - , - , - , - i64); - -define @intrinsic_vfredusum_mask_vs_nxv2f32_nxv8f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv2f32_nxv8f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu -; CHECK-NEXT: vfredusum.vs v8, v12, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredusum.mask.nxv2f32.nxv8f32( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfredusum.nxv2f32.nxv16f32( - , - , - , - i64); - -define @intrinsic_vfredusum_vs_nxv2f32_nxv16f32_nxv2f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfredusum_vs_nxv2f32_nxv16f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu -; CHECK-NEXT: vfredusum.vs v8, v16, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredusum.nxv2f32.nxv16f32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfredusum.mask.nxv2f32.nxv16f32( - , - , - , - , - i64); - -define @intrinsic_vfredusum_mask_vs_nxv2f32_nxv16f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv2f32_nxv16f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu -; CHECK-NEXT: vfredusum.vs v8, v16, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredusum.mask.nxv2f32.nxv16f32( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfredusum.nxv1f64.nxv1f64( - , - , - , - i64); - -define @intrinsic_vfredusum_vs_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfredusum_vs_nxv1f64_nxv1f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu -; CHECK-NEXT: vfredusum.vs v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredusum.nxv1f64.nxv1f64( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfredusum.mask.nxv1f64.nxv1f64( - , - , - , - , - i64); - -define @intrinsic_vfredusum_mask_vs_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv1f64_nxv1f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu -; CHECK-NEXT: vfredusum.vs v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredusum.mask.nxv1f64.nxv1f64( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfredusum.nxv1f64.nxv2f64( - , - , - , - i64); - -define @intrinsic_vfredusum_vs_nxv1f64_nxv2f64_nxv1f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfredusum_vs_nxv1f64_nxv2f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu -; CHECK-NEXT: vfredusum.vs v8, v10, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredusum.nxv1f64.nxv2f64( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfredusum.mask.nxv1f64.nxv2f64( - , - , - , - , - i64); - -define @intrinsic_vfredusum_mask_vs_nxv1f64_nxv2f64_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv1f64_nxv2f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu -; CHECK-NEXT: vfredusum.vs v8, v10, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredusum.mask.nxv1f64.nxv2f64( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfredusum.nxv1f64.nxv4f64( - , - , - , - i64); - -define @intrinsic_vfredusum_vs_nxv1f64_nxv4f64_nxv1f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfredusum_vs_nxv1f64_nxv4f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu -; CHECK-NEXT: vfredusum.vs v8, v12, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredusum.nxv1f64.nxv4f64( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfredusum.mask.nxv1f64.nxv4f64( - , - , - , - , - i64); - -define @intrinsic_vfredusum_mask_vs_nxv1f64_nxv4f64_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv1f64_nxv4f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu -; CHECK-NEXT: vfredusum.vs v8, v12, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredusum.mask.nxv1f64.nxv4f64( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfredusum.nxv1f64.nxv8f64( - , - , - , - i64); - -define @intrinsic_vfredusum_vs_nxv1f64_nxv8f64_nxv1f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfredusum_vs_nxv1f64_nxv8f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu -; CHECK-NEXT: vfredusum.vs v8, v16, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredusum.nxv1f64.nxv8f64( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfredusum.mask.nxv1f64.nxv8f64( - , - , - , - , - i64); - -define @intrinsic_vfredusum_mask_vs_nxv1f64_nxv8f64_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv1f64_nxv8f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu -; CHECK-NEXT: vfredusum.vs v8, v16, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfredusum.mask.nxv1f64.nxv8f64( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfredusum-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfredusum.ll similarity index 88% rename from llvm/test/CodeGen/RISCV/rvv/vfredusum-rv32.ll rename to llvm/test/CodeGen/RISCV/rvv/vfredusum.ll index 7b3691c74887..e6ff649ab398 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfredusum-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfredusum.ll @@ -1,13 +1,15 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfredusum.nxv4f16.nxv1f16( , , , - i32); + iXLen); -define @intrinsic_vfredusum_vs_nxv4f16_nxv1f16_nxv4f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfredusum_vs_nxv4f16_nxv1f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_vs_nxv4f16_nxv1f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu @@ -18,7 +20,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -28,9 +30,9 @@ declare @llvm.riscv.vfredusum.mask.nxv4f16.nxv1f16.nxv1i1( , , , - i32); + iXLen); -define @intrinsic_vfredusum_mask_vs_nxv4f16_nxv1f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfredusum_mask_vs_nxv4f16_nxv1f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv4f16_nxv1f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu @@ -42,7 +44,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -51,9 +53,9 @@ declare @llvm.riscv.vfredusum.nxv4f16.nxv2f16( , , , - i32); + iXLen); -define @intrinsic_vfredusum_vs_nxv4f16_nxv2f16_nxv4f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfredusum_vs_nxv4f16_nxv2f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_vs_nxv4f16_nxv2f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu @@ -64,7 +66,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -74,9 +76,9 @@ declare @llvm.riscv.vfredusum.mask.nxv4f16.nxv2f16.nxv2i1( , , , - i32); + iXLen); -define @intrinsic_vfredusum_mask_vs_nxv4f16_nxv2f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfredusum_mask_vs_nxv4f16_nxv2f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv4f16_nxv2f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu @@ -88,7 +90,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -97,9 +99,9 @@ declare @llvm.riscv.vfredusum.nxv4f16.nxv4f16( , , , - i32); + iXLen); -define @intrinsic_vfredusum_vs_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfredusum_vs_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_vs_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu @@ -110,7 +112,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -120,9 +122,9 @@ declare @llvm.riscv.vfredusum.mask.nxv4f16.nxv4f16.nxv4i1( , , , - i32); + iXLen); -define @intrinsic_vfredusum_mask_vs_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfredusum_mask_vs_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu @@ -134,7 +136,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -143,9 +145,9 @@ declare @llvm.riscv.vfredusum.nxv4f16.nxv8f16( , , , - i32); + iXLen); -define @intrinsic_vfredusum_vs_nxv4f16_nxv8f16_nxv4f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfredusum_vs_nxv4f16_nxv8f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_vs_nxv4f16_nxv8f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu @@ -156,7 +158,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -166,9 +168,9 @@ declare @llvm.riscv.vfredusum.mask.nxv4f16.nxv8f16.nxv8i1( , , , - i32); + iXLen); -define @intrinsic_vfredusum_mask_vs_nxv4f16_nxv8f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfredusum_mask_vs_nxv4f16_nxv8f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv4f16_nxv8f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu @@ -180,7 +182,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -189,9 +191,9 @@ declare @llvm.riscv.vfredusum.nxv4f16.nxv16f16( , , , - i32); + iXLen); -define @intrinsic_vfredusum_vs_nxv4f16_nxv16f16_nxv4f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfredusum_vs_nxv4f16_nxv16f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_vs_nxv4f16_nxv16f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu @@ -202,7 +204,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -212,9 +214,9 @@ declare @llvm.riscv.vfredusum.mask.nxv4f16.nxv16f16.nxv16i1( , , , - i32); + iXLen); -define @intrinsic_vfredusum_mask_vs_nxv4f16_nxv16f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfredusum_mask_vs_nxv4f16_nxv16f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv4f16_nxv16f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu @@ -226,7 +228,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -235,9 +237,9 @@ declare @llvm.riscv.vfredusum.nxv4f16.nxv32f16( , , , - i32); + iXLen); -define @intrinsic_vfredusum_vs_nxv4f16_nxv32f16_nxv4f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfredusum_vs_nxv4f16_nxv32f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_vs_nxv4f16_nxv32f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu @@ -248,7 +250,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -258,9 +260,9 @@ declare @llvm.riscv.vfredusum.mask.nxv4f16.nxv32f16.nxv32i1( , , , - i32); + iXLen); -define @intrinsic_vfredusum_mask_vs_nxv4f16_nxv32f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfredusum_mask_vs_nxv4f16_nxv32f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv4f16_nxv32f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu @@ -272,7 +274,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -281,9 +283,9 @@ declare @llvm.riscv.vfredusum.nxv2f32.nxv1f32( , , , - i32); + iXLen); -define @intrinsic_vfredusum_vs_nxv2f32_nxv1f32_nxv2f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfredusum_vs_nxv2f32_nxv1f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_vs_nxv2f32_nxv1f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu @@ -294,7 +296,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -304,9 +306,9 @@ declare @llvm.riscv.vfredusum.mask.nxv2f32.nxv1f32.nxv1i1( , , , - i32); + iXLen); -define @intrinsic_vfredusum_mask_vs_nxv2f32_nxv1f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfredusum_mask_vs_nxv2f32_nxv1f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv2f32_nxv1f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu @@ -318,7 +320,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -327,9 +329,9 @@ declare @llvm.riscv.vfredusum.nxv2f32.nxv2f32( , , , - i32); + iXLen); -define @intrinsic_vfredusum_vs_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfredusum_vs_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_vs_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu @@ -340,7 +342,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -350,9 +352,9 @@ declare @llvm.riscv.vfredusum.mask.nxv2f32.nxv2f32.nxv2i1( , , , - i32); + iXLen); -define @intrinsic_vfredusum_mask_vs_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfredusum_mask_vs_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu @@ -364,7 +366,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -373,9 +375,9 @@ declare @llvm.riscv.vfredusum.nxv2f32.nxv4f32( , , , - i32); + iXLen); -define @intrinsic_vfredusum_vs_nxv2f32_nxv4f32_nxv2f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfredusum_vs_nxv2f32_nxv4f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_vs_nxv2f32_nxv4f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu @@ -386,7 +388,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -396,9 +398,9 @@ declare @llvm.riscv.vfredusum.mask.nxv2f32.nxv4f32.nxv4i1( , , , - i32); + iXLen); -define @intrinsic_vfredusum_mask_vs_nxv2f32_nxv4f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfredusum_mask_vs_nxv2f32_nxv4f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv2f32_nxv4f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu @@ -410,7 +412,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -419,9 +421,9 @@ declare @llvm.riscv.vfredusum.nxv2f32.nxv8f32( , , , - i32); + iXLen); -define @intrinsic_vfredusum_vs_nxv2f32_nxv8f32_nxv2f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfredusum_vs_nxv2f32_nxv8f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_vs_nxv2f32_nxv8f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu @@ -432,7 +434,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -442,9 +444,9 @@ declare @llvm.riscv.vfredusum.mask.nxv2f32.nxv8f32.nxv8i1( , , , - i32); + iXLen); -define @intrinsic_vfredusum_mask_vs_nxv2f32_nxv8f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfredusum_mask_vs_nxv2f32_nxv8f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv2f32_nxv8f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu @@ -456,7 +458,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -465,9 +467,9 @@ declare @llvm.riscv.vfredusum.nxv2f32.nxv16f32( , , , - i32); + iXLen); -define @intrinsic_vfredusum_vs_nxv2f32_nxv16f32_nxv2f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfredusum_vs_nxv2f32_nxv16f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_vs_nxv2f32_nxv16f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu @@ -478,7 +480,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -488,9 +490,9 @@ declare @llvm.riscv.vfredusum.mask.nxv2f32.nxv16f32.nxv16i1 , , , - i32); + iXLen); -define @intrinsic_vfredusum_mask_vs_nxv2f32_nxv16f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfredusum_mask_vs_nxv2f32_nxv16f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv2f32_nxv16f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu @@ -502,7 +504,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -511,9 +513,9 @@ declare @llvm.riscv.vfredusum.nxv1f64.nxv1f64( , , , - i32); + iXLen); -define @intrinsic_vfredusum_vs_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfredusum_vs_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_vs_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu @@ -524,7 +526,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -534,9 +536,9 @@ declare @llvm.riscv.vfredusum.mask.nxv1f64.nxv1f64.nxv1i1( , , , - i32); + iXLen); -define @intrinsic_vfredusum_mask_vs_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfredusum_mask_vs_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu @@ -548,7 +550,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -557,9 +559,9 @@ declare @llvm.riscv.vfredusum.nxv1f64.nxv2f64( , , , - i32); + iXLen); -define @intrinsic_vfredusum_vs_nxv1f64_nxv2f64_nxv1f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfredusum_vs_nxv1f64_nxv2f64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_vs_nxv1f64_nxv2f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu @@ -570,7 +572,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -580,9 +582,9 @@ declare @llvm.riscv.vfredusum.mask.nxv1f64.nxv2f64.nxv2i1( , , , - i32); + iXLen); -define @intrinsic_vfredusum_mask_vs_nxv1f64_nxv2f64_nxv1f64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfredusum_mask_vs_nxv1f64_nxv2f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv1f64_nxv2f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu @@ -594,7 +596,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -603,9 +605,9 @@ declare @llvm.riscv.vfredusum.nxv1f64.nxv4f64( , , , - i32); + iXLen); -define @intrinsic_vfredusum_vs_nxv1f64_nxv4f64_nxv1f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfredusum_vs_nxv1f64_nxv4f64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_vs_nxv1f64_nxv4f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu @@ -616,7 +618,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -626,9 +628,9 @@ declare @llvm.riscv.vfredusum.mask.nxv1f64.nxv4f64.nxv4i1( , , , - i32); + iXLen); -define @intrinsic_vfredusum_mask_vs_nxv1f64_nxv4f64_nxv1f64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfredusum_mask_vs_nxv1f64_nxv4f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv1f64_nxv4f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu @@ -640,7 +642,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -649,9 +651,9 @@ declare @llvm.riscv.vfredusum.nxv1f64.nxv8f64( , , , - i32); + iXLen); -define @intrinsic_vfredusum_vs_nxv1f64_nxv8f64_nxv1f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfredusum_vs_nxv1f64_nxv8f64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_vs_nxv1f64_nxv8f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu @@ -662,7 +664,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -672,9 +674,9 @@ declare @llvm.riscv.vfredusum.mask.nxv1f64.nxv8f64.nxv8i1( , , , - i32); + iXLen); -define @intrinsic_vfredusum_mask_vs_nxv1f64_nxv8f64_nxv1f64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfredusum_mask_vs_nxv1f64_nxv8f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv1f64_nxv8f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu @@ -686,7 +688,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfrsqrt7-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfrsqrt7-rv64.ll deleted file mode 100644 index cf9d7a4c0af5..000000000000 --- a/llvm/test/CodeGen/RISCV/rvv/vfrsqrt7-rv64.ll +++ /dev/null @@ -1,617 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s -declare @llvm.riscv.vfrsqrt7.nxv1f16( - , - i64); - -define @intrinsic_vfrsqrt7_v_nxv1f16_nxv1f16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv1f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfrsqrt7.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrsqrt7.nxv1f16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfrsqrt7.mask.nxv1f16( - , - , - , - i64, - i64); - -define @intrinsic_vfrsqrt7_mask_v_nxv1f16_nxv1f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv1f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfrsqrt7.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrsqrt7.mask.nxv1f16( - %1, - %2, - %0, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfrsqrt7.nxv2f16( - , - i64); - -define @intrinsic_vfrsqrt7_v_nxv2f16_nxv2f16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv2f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfrsqrt7.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrsqrt7.nxv2f16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfrsqrt7.mask.nxv2f16( - , - , - , - i64, - i64); - -define @intrinsic_vfrsqrt7_mask_v_nxv2f16_nxv2f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv2f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfrsqrt7.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrsqrt7.mask.nxv2f16( - %1, - %2, - %0, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfrsqrt7.nxv4f16( - , - i64); - -define @intrinsic_vfrsqrt7_v_nxv4f16_nxv4f16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv4f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfrsqrt7.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrsqrt7.nxv4f16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfrsqrt7.mask.nxv4f16( - , - , - , - i64, - i64); - -define @intrinsic_vfrsqrt7_mask_v_nxv4f16_nxv4f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv4f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfrsqrt7.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrsqrt7.mask.nxv4f16( - %1, - %2, - %0, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfrsqrt7.nxv8f16( - , - i64); - -define @intrinsic_vfrsqrt7_v_nxv8f16_nxv8f16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv8f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfrsqrt7.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrsqrt7.nxv8f16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfrsqrt7.mask.nxv8f16( - , - , - , - i64, - i64); - -define @intrinsic_vfrsqrt7_mask_v_nxv8f16_nxv8f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv8f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfrsqrt7.v v8, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrsqrt7.mask.nxv8f16( - %1, - %2, - %0, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfrsqrt7.nxv16f16( - , - i64); - -define @intrinsic_vfrsqrt7_v_nxv16f16_nxv16f16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv16f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfrsqrt7.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrsqrt7.nxv16f16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfrsqrt7.mask.nxv16f16( - , - , - , - i64, - i64); - -define @intrinsic_vfrsqrt7_mask_v_nxv16f16_nxv16f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv16f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfrsqrt7.v v8, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrsqrt7.mask.nxv16f16( - %1, - %2, - %0, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfrsqrt7.nxv32f16( - , - i64); - -define @intrinsic_vfrsqrt7_v_nxv32f16_nxv32f16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv32f16_nxv32f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu -; CHECK-NEXT: vfrsqrt7.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrsqrt7.nxv32f16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfrsqrt7.mask.nxv32f16( - , - , - , - i64, - i64); - -define @intrinsic_vfrsqrt7_mask_v_nxv32f16_nxv32f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv32f16_nxv32f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu -; CHECK-NEXT: vfrsqrt7.v v8, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrsqrt7.mask.nxv32f16( - %1, - %2, - %0, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfrsqrt7.nxv1f32( - , - i64); - -define @intrinsic_vfrsqrt7_v_nxv1f32_nxv1f32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv1f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfrsqrt7.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrsqrt7.nxv1f32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfrsqrt7.mask.nxv1f32( - , - , - , - i64, - i64); - -define @intrinsic_vfrsqrt7_mask_v_nxv1f32_nxv1f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv1f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfrsqrt7.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrsqrt7.mask.nxv1f32( - %1, - %2, - %0, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfrsqrt7.nxv2f32( - , - i64); - -define @intrinsic_vfrsqrt7_v_nxv2f32_nxv2f32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv2f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfrsqrt7.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrsqrt7.nxv2f32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfrsqrt7.mask.nxv2f32( - , - , - , - i64, - i64); - -define @intrinsic_vfrsqrt7_mask_v_nxv2f32_nxv2f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv2f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfrsqrt7.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrsqrt7.mask.nxv2f32( - %1, - %2, - %0, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfrsqrt7.nxv4f32( - , - i64); - -define @intrinsic_vfrsqrt7_v_nxv4f32_nxv4f32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv4f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfrsqrt7.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrsqrt7.nxv4f32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfrsqrt7.mask.nxv4f32( - , - , - , - i64, - i64); - -define @intrinsic_vfrsqrt7_mask_v_nxv4f32_nxv4f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv4f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfrsqrt7.v v8, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrsqrt7.mask.nxv4f32( - %1, - %2, - %0, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfrsqrt7.nxv8f32( - , - i64); - -define @intrinsic_vfrsqrt7_v_nxv8f32_nxv8f32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv8f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfrsqrt7.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrsqrt7.nxv8f32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfrsqrt7.mask.nxv8f32( - , - , - , - i64, - i64); - -define @intrinsic_vfrsqrt7_mask_v_nxv8f32_nxv8f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv8f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfrsqrt7.v v8, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrsqrt7.mask.nxv8f32( - %1, - %2, - %0, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfrsqrt7.nxv16f32( - , - i64); - -define @intrinsic_vfrsqrt7_v_nxv16f32_nxv16f32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv16f32_nxv16f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu -; CHECK-NEXT: vfrsqrt7.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrsqrt7.nxv16f32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfrsqrt7.mask.nxv16f32( - , - , - , - i64, - i64); - -define @intrinsic_vfrsqrt7_mask_v_nxv16f32_nxv16f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv16f32_nxv16f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu -; CHECK-NEXT: vfrsqrt7.v v8, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrsqrt7.mask.nxv16f32( - %1, - %2, - %0, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfrsqrt7.nxv1f64( - , - i64); - -define @intrinsic_vfrsqrt7_v_nxv1f64_nxv1f64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv1f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vfrsqrt7.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrsqrt7.nxv1f64( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfrsqrt7.mask.nxv1f64( - , - , - , - i64, - i64); - -define @intrinsic_vfrsqrt7_mask_v_nxv1f64_nxv1f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv1f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vfrsqrt7.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrsqrt7.mask.nxv1f64( - %1, - %2, - %0, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfrsqrt7.nxv2f64( - , - i64); - -define @intrinsic_vfrsqrt7_v_nxv2f64_nxv2f64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv2f64_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vfrsqrt7.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrsqrt7.nxv2f64( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfrsqrt7.mask.nxv2f64( - , - , - , - i64, - i64); - -define @intrinsic_vfrsqrt7_mask_v_nxv2f64_nxv2f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv2f64_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vfrsqrt7.v v8, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrsqrt7.mask.nxv2f64( - %1, - %2, - %0, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfrsqrt7.nxv4f64( - , - i64); - -define @intrinsic_vfrsqrt7_v_nxv4f64_nxv4f64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv4f64_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vfrsqrt7.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrsqrt7.nxv4f64( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfrsqrt7.mask.nxv4f64( - , - , - , - i64, - i64); - -define @intrinsic_vfrsqrt7_mask_v_nxv4f64_nxv4f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv4f64_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vfrsqrt7.v v8, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrsqrt7.mask.nxv4f64( - %1, - %2, - %0, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfrsqrt7.nxv8f64( - , - i64); - -define @intrinsic_vfrsqrt7_v_nxv8f64_nxv8f64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv8f64_nxv8f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu -; CHECK-NEXT: vfrsqrt7.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrsqrt7.nxv8f64( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfrsqrt7.mask.nxv8f64( - , - , - , - i64, - i64); - -define @intrinsic_vfrsqrt7_mask_v_nxv8f64_nxv8f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv8f64_nxv8f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu -; CHECK-NEXT: vfrsqrt7.v v8, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrsqrt7.mask.nxv8f64( - %1, - %2, - %0, - i64 %3, i64 1) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfrsqrt7-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfrsqrt7.ll similarity index 87% rename from llvm/test/CodeGen/RISCV/rvv/vfrsqrt7-rv32.ll rename to llvm/test/CodeGen/RISCV/rvv/vfrsqrt7.ll index d0198a85b0c5..a521b6c2f2b5 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfrsqrt7-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfrsqrt7.ll @@ -1,11 +1,13 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfrsqrt7.nxv1f16( , - i32); + iXLen); -define @intrinsic_vfrsqrt7_v_nxv1f16_nxv1f16( %0, i32 %1) nounwind { +define @intrinsic_vfrsqrt7_v_nxv1f16_nxv1f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -14,7 +16,7 @@ define @intrinsic_vfrsqrt7_v_nxv1f16_nxv1f16( @llvm.riscv.vfrsqrt7.nxv1f16( %0, - i32 %1) + iXLen %1) ret %a } @@ -23,10 +25,10 @@ declare @llvm.riscv.vfrsqrt7.mask.nxv1f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfrsqrt7_mask_v_nxv1f16_nxv1f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfrsqrt7_mask_v_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -37,16 +39,16 @@ entry: %1, %2, %0, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfrsqrt7.nxv2f16( , - i32); + iXLen); -define @intrinsic_vfrsqrt7_v_nxv2f16_nxv2f16( %0, i32 %1) nounwind { +define @intrinsic_vfrsqrt7_v_nxv2f16_nxv2f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -55,7 +57,7 @@ define @intrinsic_vfrsqrt7_v_nxv2f16_nxv2f16( @llvm.riscv.vfrsqrt7.nxv2f16( %0, - i32 %1) + iXLen %1) ret %a } @@ -64,10 +66,10 @@ declare @llvm.riscv.vfrsqrt7.mask.nxv2f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfrsqrt7_mask_v_nxv2f16_nxv2f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfrsqrt7_mask_v_nxv2f16_nxv2f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -78,16 +80,16 @@ entry: %1, %2, %0, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfrsqrt7.nxv4f16( , - i32); + iXLen); -define @intrinsic_vfrsqrt7_v_nxv4f16_nxv4f16( %0, i32 %1) nounwind { +define @intrinsic_vfrsqrt7_v_nxv4f16_nxv4f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -96,7 +98,7 @@ define @intrinsic_vfrsqrt7_v_nxv4f16_nxv4f16( @llvm.riscv.vfrsqrt7.nxv4f16( %0, - i32 %1) + iXLen %1) ret %a } @@ -105,10 +107,10 @@ declare @llvm.riscv.vfrsqrt7.mask.nxv4f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfrsqrt7_mask_v_nxv4f16_nxv4f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfrsqrt7_mask_v_nxv4f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -119,16 +121,16 @@ entry: %1, %2, %0, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfrsqrt7.nxv8f16( , - i32); + iXLen); -define @intrinsic_vfrsqrt7_v_nxv8f16_nxv8f16( %0, i32 %1) nounwind { +define @intrinsic_vfrsqrt7_v_nxv8f16_nxv8f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -137,7 +139,7 @@ define @intrinsic_vfrsqrt7_v_nxv8f16_nxv8f16( @llvm.riscv.vfrsqrt7.nxv8f16( %0, - i32 %1) + iXLen %1) ret %a } @@ -146,10 +148,10 @@ declare @llvm.riscv.vfrsqrt7.mask.nxv8f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfrsqrt7_mask_v_nxv8f16_nxv8f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfrsqrt7_mask_v_nxv8f16_nxv8f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -160,16 +162,16 @@ entry: %1, %2, %0, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfrsqrt7.nxv16f16( , - i32); + iXLen); -define @intrinsic_vfrsqrt7_v_nxv16f16_nxv16f16( %0, i32 %1) nounwind { +define @intrinsic_vfrsqrt7_v_nxv16f16_nxv16f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -178,7 +180,7 @@ define @intrinsic_vfrsqrt7_v_nxv16f16_nxv16f16( @llvm.riscv.vfrsqrt7.nxv16f16( %0, - i32 %1) + iXLen %1) ret %a } @@ -187,10 +189,10 @@ declare @llvm.riscv.vfrsqrt7.mask.nxv16f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfrsqrt7_mask_v_nxv16f16_nxv16f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfrsqrt7_mask_v_nxv16f16_nxv16f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -201,16 +203,16 @@ entry: %1, %2, %0, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfrsqrt7.nxv32f16( , - i32); + iXLen); -define @intrinsic_vfrsqrt7_v_nxv32f16_nxv32f16( %0, i32 %1) nounwind { +define @intrinsic_vfrsqrt7_v_nxv32f16_nxv32f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu @@ -219,7 +221,7 @@ define @intrinsic_vfrsqrt7_v_nxv32f16_nxv32f16( @llvm.riscv.vfrsqrt7.nxv32f16( %0, - i32 %1) + iXLen %1) ret %a } @@ -228,10 +230,10 @@ declare @llvm.riscv.vfrsqrt7.mask.nxv32f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfrsqrt7_mask_v_nxv32f16_nxv32f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfrsqrt7_mask_v_nxv32f16_nxv32f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu @@ -242,16 +244,16 @@ entry: %1, %2, %0, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfrsqrt7.nxv1f32( , - i32); + iXLen); -define @intrinsic_vfrsqrt7_v_nxv1f32_nxv1f32( %0, i32 %1) nounwind { +define @intrinsic_vfrsqrt7_v_nxv1f32_nxv1f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -260,7 +262,7 @@ define @intrinsic_vfrsqrt7_v_nxv1f32_nxv1f32( @llvm.riscv.vfrsqrt7.nxv1f32( %0, - i32 %1) + iXLen %1) ret %a } @@ -269,10 +271,10 @@ declare @llvm.riscv.vfrsqrt7.mask.nxv1f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfrsqrt7_mask_v_nxv1f32_nxv1f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfrsqrt7_mask_v_nxv1f32_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -283,16 +285,16 @@ entry: %1, %2, %0, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfrsqrt7.nxv2f32( , - i32); + iXLen); -define @intrinsic_vfrsqrt7_v_nxv2f32_nxv2f32( %0, i32 %1) nounwind { +define @intrinsic_vfrsqrt7_v_nxv2f32_nxv2f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -301,7 +303,7 @@ define @intrinsic_vfrsqrt7_v_nxv2f32_nxv2f32( @llvm.riscv.vfrsqrt7.nxv2f32( %0, - i32 %1) + iXLen %1) ret %a } @@ -310,10 +312,10 @@ declare @llvm.riscv.vfrsqrt7.mask.nxv2f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfrsqrt7_mask_v_nxv2f32_nxv2f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfrsqrt7_mask_v_nxv2f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -324,16 +326,16 @@ entry: %1, %2, %0, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfrsqrt7.nxv4f32( , - i32); + iXLen); -define @intrinsic_vfrsqrt7_v_nxv4f32_nxv4f32( %0, i32 %1) nounwind { +define @intrinsic_vfrsqrt7_v_nxv4f32_nxv4f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -342,7 +344,7 @@ define @intrinsic_vfrsqrt7_v_nxv4f32_nxv4f32( @llvm.riscv.vfrsqrt7.nxv4f32( %0, - i32 %1) + iXLen %1) ret %a } @@ -351,10 +353,10 @@ declare @llvm.riscv.vfrsqrt7.mask.nxv4f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfrsqrt7_mask_v_nxv4f32_nxv4f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfrsqrt7_mask_v_nxv4f32_nxv4f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -365,16 +367,16 @@ entry: %1, %2, %0, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfrsqrt7.nxv8f32( , - i32); + iXLen); -define @intrinsic_vfrsqrt7_v_nxv8f32_nxv8f32( %0, i32 %1) nounwind { +define @intrinsic_vfrsqrt7_v_nxv8f32_nxv8f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -383,7 +385,7 @@ define @intrinsic_vfrsqrt7_v_nxv8f32_nxv8f32( @llvm.riscv.vfrsqrt7.nxv8f32( %0, - i32 %1) + iXLen %1) ret %a } @@ -392,10 +394,10 @@ declare @llvm.riscv.vfrsqrt7.mask.nxv8f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfrsqrt7_mask_v_nxv8f32_nxv8f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfrsqrt7_mask_v_nxv8f32_nxv8f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -406,16 +408,16 @@ entry: %1, %2, %0, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfrsqrt7.nxv16f32( , - i32); + iXLen); -define @intrinsic_vfrsqrt7_v_nxv16f32_nxv16f32( %0, i32 %1) nounwind { +define @intrinsic_vfrsqrt7_v_nxv16f32_nxv16f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu @@ -424,7 +426,7 @@ define @intrinsic_vfrsqrt7_v_nxv16f32_nxv16f32( @llvm.riscv.vfrsqrt7.nxv16f32( %0, - i32 %1) + iXLen %1) ret %a } @@ -433,10 +435,10 @@ declare @llvm.riscv.vfrsqrt7.mask.nxv16f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfrsqrt7_mask_v_nxv16f32_nxv16f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfrsqrt7_mask_v_nxv16f32_nxv16f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu @@ -447,16 +449,16 @@ entry: %1, %2, %0, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfrsqrt7.nxv1f64( , - i32); + iXLen); -define @intrinsic_vfrsqrt7_v_nxv1f64_nxv1f64( %0, i32 %1) nounwind { +define @intrinsic_vfrsqrt7_v_nxv1f64_nxv1f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu @@ -465,7 +467,7 @@ define @intrinsic_vfrsqrt7_v_nxv1f64_nxv1f64( @llvm.riscv.vfrsqrt7.nxv1f64( %0, - i32 %1) + iXLen %1) ret %a } @@ -474,10 +476,10 @@ declare @llvm.riscv.vfrsqrt7.mask.nxv1f64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfrsqrt7_mask_v_nxv1f64_nxv1f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfrsqrt7_mask_v_nxv1f64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu @@ -488,16 +490,16 @@ entry: %1, %2, %0, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfrsqrt7.nxv2f64( , - i32); + iXLen); -define @intrinsic_vfrsqrt7_v_nxv2f64_nxv2f64( %0, i32 %1) nounwind { +define @intrinsic_vfrsqrt7_v_nxv2f64_nxv2f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu @@ -506,7 +508,7 @@ define @intrinsic_vfrsqrt7_v_nxv2f64_nxv2f64( @llvm.riscv.vfrsqrt7.nxv2f64( %0, - i32 %1) + iXLen %1) ret %a } @@ -515,10 +517,10 @@ declare @llvm.riscv.vfrsqrt7.mask.nxv2f64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfrsqrt7_mask_v_nxv2f64_nxv2f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfrsqrt7_mask_v_nxv2f64_nxv2f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu @@ -529,16 +531,16 @@ entry: %1, %2, %0, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfrsqrt7.nxv4f64( , - i32); + iXLen); -define @intrinsic_vfrsqrt7_v_nxv4f64_nxv4f64( %0, i32 %1) nounwind { +define @intrinsic_vfrsqrt7_v_nxv4f64_nxv4f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu @@ -547,7 +549,7 @@ define @intrinsic_vfrsqrt7_v_nxv4f64_nxv4f64( @llvm.riscv.vfrsqrt7.nxv4f64( %0, - i32 %1) + iXLen %1) ret %a } @@ -556,10 +558,10 @@ declare @llvm.riscv.vfrsqrt7.mask.nxv4f64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfrsqrt7_mask_v_nxv4f64_nxv4f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfrsqrt7_mask_v_nxv4f64_nxv4f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu @@ -570,16 +572,16 @@ entry: %1, %2, %0, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfrsqrt7.nxv8f64( , - i32); + iXLen); -define @intrinsic_vfrsqrt7_v_nxv8f64_nxv8f64( %0, i32 %1) nounwind { +define @intrinsic_vfrsqrt7_v_nxv8f64_nxv8f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu @@ -588,7 +590,7 @@ define @intrinsic_vfrsqrt7_v_nxv8f64_nxv8f64( @llvm.riscv.vfrsqrt7.nxv8f64( %0, - i32 %1) + iXLen %1) ret %a } @@ -597,10 +599,10 @@ declare @llvm.riscv.vfrsqrt7.mask.nxv8f64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfrsqrt7_mask_v_nxv8f64_nxv8f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfrsqrt7_mask_v_nxv8f64_nxv8f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu @@ -611,7 +613,7 @@ entry: %1, %2, %0, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfrsub-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfrsub-rv64.ll deleted file mode 100644 index 0477554c9141..000000000000 --- a/llvm/test/CodeGen/RISCV/rvv/vfrsub-rv64.ll +++ /dev/null @@ -1,678 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v -mattr=+zfh \ -; RUN: -mattr=+d -verify-machineinstrs \ -; RUN: -target-abi=lp64d < %s | FileCheck %s -declare @llvm.riscv.vfrsub.nxv1f16.f16( - , - half, - i64); - -define @intrinsic_vfrsub_vf_nxv1f16_nxv1f16_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfrsub_vf_nxv1f16_nxv1f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfrsub.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrsub.nxv1f16.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfrsub.mask.nxv1f16.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfrsub_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv1f16_nxv1f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfrsub.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrsub.mask.nxv1f16.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfrsub.nxv2f16.f16( - , - half, - i64); - -define @intrinsic_vfrsub_vf_nxv2f16_nxv2f16_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfrsub_vf_nxv2f16_nxv2f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfrsub.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrsub.nxv2f16.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfrsub.mask.nxv2f16.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfrsub_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv2f16_nxv2f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfrsub.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrsub.mask.nxv2f16.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfrsub.nxv4f16.f16( - , - half, - i64); - -define @intrinsic_vfrsub_vf_nxv4f16_nxv4f16_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfrsub_vf_nxv4f16_nxv4f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfrsub.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrsub.nxv4f16.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfrsub.mask.nxv4f16.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfrsub_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv4f16_nxv4f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfrsub.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrsub.mask.nxv4f16.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfrsub.nxv8f16.f16( - , - half, - i64); - -define @intrinsic_vfrsub_vf_nxv8f16_nxv8f16_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfrsub_vf_nxv8f16_nxv8f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfrsub.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrsub.nxv8f16.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfrsub.mask.nxv8f16.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfrsub_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv8f16_nxv8f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfrsub.vf v8, v10, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrsub.mask.nxv8f16.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfrsub.nxv16f16.f16( - , - half, - i64); - -define @intrinsic_vfrsub_vf_nxv16f16_nxv16f16_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfrsub_vf_nxv16f16_nxv16f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfrsub.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrsub.nxv16f16.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfrsub.mask.nxv16f16.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfrsub_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv16f16_nxv16f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfrsub.vf v8, v12, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrsub.mask.nxv16f16.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfrsub.nxv32f16.f16( - , - half, - i64); - -define @intrinsic_vfrsub_vf_nxv32f16_nxv32f16_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfrsub_vf_nxv32f16_nxv32f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu -; CHECK-NEXT: vfrsub.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrsub.nxv32f16.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfrsub.mask.nxv32f16.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfrsub_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv32f16_nxv32f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu -; CHECK-NEXT: vfrsub.vf v8, v16, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrsub.mask.nxv32f16.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfrsub.nxv1f32.f32( - , - float, - i64); - -define @intrinsic_vfrsub_vf_nxv1f32_nxv1f32_f32( %0, float %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfrsub_vf_nxv1f32_nxv1f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfrsub.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrsub.nxv1f32.f32( - %0, - float %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfrsub.mask.nxv1f32.f32( - , - , - float, - , - i64, - i64); - -define @intrinsic_vfrsub_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv1f32_nxv1f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfrsub.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrsub.mask.nxv1f32.f32( - %0, - %1, - float %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfrsub.nxv2f32.f32( - , - float, - i64); - -define @intrinsic_vfrsub_vf_nxv2f32_nxv2f32_f32( %0, float %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfrsub_vf_nxv2f32_nxv2f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfrsub.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrsub.nxv2f32.f32( - %0, - float %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfrsub.mask.nxv2f32.f32( - , - , - float, - , - i64, - i64); - -define @intrinsic_vfrsub_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv2f32_nxv2f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfrsub.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrsub.mask.nxv2f32.f32( - %0, - %1, - float %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfrsub.nxv4f32.f32( - , - float, - i64); - -define @intrinsic_vfrsub_vf_nxv4f32_nxv4f32_f32( %0, float %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfrsub_vf_nxv4f32_nxv4f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfrsub.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrsub.nxv4f32.f32( - %0, - float %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfrsub.mask.nxv4f32.f32( - , - , - float, - , - i64, - i64); - -define @intrinsic_vfrsub_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv4f32_nxv4f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfrsub.vf v8, v10, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrsub.mask.nxv4f32.f32( - %0, - %1, - float %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfrsub.nxv8f32.f32( - , - float, - i64); - -define @intrinsic_vfrsub_vf_nxv8f32_nxv8f32_f32( %0, float %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfrsub_vf_nxv8f32_nxv8f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfrsub.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrsub.nxv8f32.f32( - %0, - float %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfrsub.mask.nxv8f32.f32( - , - , - float, - , - i64, - i64); - -define @intrinsic_vfrsub_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv8f32_nxv8f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfrsub.vf v8, v12, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrsub.mask.nxv8f32.f32( - %0, - %1, - float %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfrsub.nxv16f32.f32( - , - float, - i64); - -define @intrinsic_vfrsub_vf_nxv16f32_nxv16f32_f32( %0, float %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfrsub_vf_nxv16f32_nxv16f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu -; CHECK-NEXT: vfrsub.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrsub.nxv16f32.f32( - %0, - float %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfrsub.mask.nxv16f32.f32( - , - , - float, - , - i64, - i64); - -define @intrinsic_vfrsub_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv16f32_nxv16f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu -; CHECK-NEXT: vfrsub.vf v8, v16, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrsub.mask.nxv16f32.f32( - %0, - %1, - float %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfrsub.nxv1f64.f64( - , - double, - i64); - -define @intrinsic_vfrsub_vf_nxv1f64_nxv1f64_f64( %0, double %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfrsub_vf_nxv1f64_nxv1f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vfrsub.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrsub.nxv1f64.f64( - %0, - double %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfrsub.mask.nxv1f64.f64( - , - , - double, - , - i64, - i64); - -define @intrinsic_vfrsub_mask_vf_nxv1f64_nxv1f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv1f64_nxv1f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vfrsub.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrsub.mask.nxv1f64.f64( - %0, - %1, - double %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfrsub.nxv2f64.f64( - , - double, - i64); - -define @intrinsic_vfrsub_vf_nxv2f64_nxv2f64_f64( %0, double %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfrsub_vf_nxv2f64_nxv2f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vfrsub.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrsub.nxv2f64.f64( - %0, - double %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfrsub.mask.nxv2f64.f64( - , - , - double, - , - i64, - i64); - -define @intrinsic_vfrsub_mask_vf_nxv2f64_nxv2f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv2f64_nxv2f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vfrsub.vf v8, v10, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrsub.mask.nxv2f64.f64( - %0, - %1, - double %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfrsub.nxv4f64.f64( - , - double, - i64); - -define @intrinsic_vfrsub_vf_nxv4f64_nxv4f64_f64( %0, double %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfrsub_vf_nxv4f64_nxv4f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vfrsub.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrsub.nxv4f64.f64( - %0, - double %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfrsub.mask.nxv4f64.f64( - , - , - double, - , - i64, - i64); - -define @intrinsic_vfrsub_mask_vf_nxv4f64_nxv4f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv4f64_nxv4f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vfrsub.vf v8, v12, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrsub.mask.nxv4f64.f64( - %0, - %1, - double %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfrsub.nxv8f64.f64( - , - double, - i64); - -define @intrinsic_vfrsub_vf_nxv8f64_nxv8f64_f64( %0, double %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfrsub_vf_nxv8f64_nxv8f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu -; CHECK-NEXT: vfrsub.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrsub.nxv8f64.f64( - %0, - double %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfrsub.mask.nxv8f64.f64( - , - , - double, - , - i64, - i64); - -define @intrinsic_vfrsub_mask_vf_nxv8f64_nxv8f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv8f64_nxv8f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu -; CHECK-NEXT: vfrsub.vf v8, v16, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfrsub.mask.nxv8f64.f64( - %0, - %1, - double %2, - %3, - i64 %4, i64 1) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfrsub-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfrsub.ll similarity index 86% rename from llvm/test/CodeGen/RISCV/rvv/vfrsub-rv32.ll rename to llvm/test/CodeGen/RISCV/rvv/vfrsub.ll index eab5b2a414e1..3fb281562088 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfrsub-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfrsub.ll @@ -1,12 +1,14 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: -target-abi=ilp32d < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfrsub.nxv1f16.f16( , half, - i32); + iXLen); -define @intrinsic_vfrsub_vf_nxv1f16_nxv1f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfrsub_vf_nxv1f16_nxv1f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -16,7 +18,7 @@ entry: %a = call @llvm.riscv.vfrsub.nxv1f16.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -26,10 +28,10 @@ declare @llvm.riscv.vfrsub.mask.nxv1f16.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfrsub_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfrsub_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -41,7 +43,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -49,9 +51,9 @@ entry: declare @llvm.riscv.vfrsub.nxv2f16.f16( , half, - i32); + iXLen); -define @intrinsic_vfrsub_vf_nxv2f16_nxv2f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfrsub_vf_nxv2f16_nxv2f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -61,7 +63,7 @@ entry: %a = call @llvm.riscv.vfrsub.nxv2f16.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -71,10 +73,10 @@ declare @llvm.riscv.vfrsub.mask.nxv2f16.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfrsub_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfrsub_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -86,7 +88,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -94,9 +96,9 @@ entry: declare @llvm.riscv.vfrsub.nxv4f16.f16( , half, - i32); + iXLen); -define @intrinsic_vfrsub_vf_nxv4f16_nxv4f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfrsub_vf_nxv4f16_nxv4f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -106,7 +108,7 @@ entry: %a = call @llvm.riscv.vfrsub.nxv4f16.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -116,10 +118,10 @@ declare @llvm.riscv.vfrsub.mask.nxv4f16.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfrsub_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfrsub_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -131,7 +133,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -139,9 +141,9 @@ entry: declare @llvm.riscv.vfrsub.nxv8f16.f16( , half, - i32); + iXLen); -define @intrinsic_vfrsub_vf_nxv8f16_nxv8f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfrsub_vf_nxv8f16_nxv8f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -151,7 +153,7 @@ entry: %a = call @llvm.riscv.vfrsub.nxv8f16.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -161,10 +163,10 @@ declare @llvm.riscv.vfrsub.mask.nxv8f16.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfrsub_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfrsub_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -176,7 +178,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -184,9 +186,9 @@ entry: declare @llvm.riscv.vfrsub.nxv16f16.f16( , half, - i32); + iXLen); -define @intrinsic_vfrsub_vf_nxv16f16_nxv16f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfrsub_vf_nxv16f16_nxv16f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -196,7 +198,7 @@ entry: %a = call @llvm.riscv.vfrsub.nxv16f16.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -206,10 +208,10 @@ declare @llvm.riscv.vfrsub.mask.nxv16f16.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfrsub_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfrsub_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -221,7 +223,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -229,9 +231,9 @@ entry: declare @llvm.riscv.vfrsub.nxv32f16.f16( , half, - i32); + iXLen); -define @intrinsic_vfrsub_vf_nxv32f16_nxv32f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfrsub_vf_nxv32f16_nxv32f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu @@ -241,7 +243,7 @@ entry: %a = call @llvm.riscv.vfrsub.nxv32f16.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -251,10 +253,10 @@ declare @llvm.riscv.vfrsub.mask.nxv32f16.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfrsub_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfrsub_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu @@ -266,7 +268,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -274,9 +276,9 @@ entry: declare @llvm.riscv.vfrsub.nxv1f32.f32( , float, - i32); + iXLen); -define @intrinsic_vfrsub_vf_nxv1f32_nxv1f32_f32( %0, float %1, i32 %2) nounwind { +define @intrinsic_vfrsub_vf_nxv1f32_nxv1f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -286,7 +288,7 @@ entry: %a = call @llvm.riscv.vfrsub.nxv1f32.f32( %0, float %1, - i32 %2) + iXLen %2) ret %a } @@ -296,10 +298,10 @@ declare @llvm.riscv.vfrsub.mask.nxv1f32.f32( , float, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfrsub_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +define @intrinsic_vfrsub_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -311,7 +313,7 @@ entry: %1, float %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -319,9 +321,9 @@ entry: declare @llvm.riscv.vfrsub.nxv2f32.f32( , float, - i32); + iXLen); -define @intrinsic_vfrsub_vf_nxv2f32_nxv2f32_f32( %0, float %1, i32 %2) nounwind { +define @intrinsic_vfrsub_vf_nxv2f32_nxv2f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -331,7 +333,7 @@ entry: %a = call @llvm.riscv.vfrsub.nxv2f32.f32( %0, float %1, - i32 %2) + iXLen %2) ret %a } @@ -341,10 +343,10 @@ declare @llvm.riscv.vfrsub.mask.nxv2f32.f32( , float, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfrsub_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +define @intrinsic_vfrsub_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -356,7 +358,7 @@ entry: %1, float %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -364,9 +366,9 @@ entry: declare @llvm.riscv.vfrsub.nxv4f32.f32( , float, - i32); + iXLen); -define @intrinsic_vfrsub_vf_nxv4f32_nxv4f32_f32( %0, float %1, i32 %2) nounwind { +define @intrinsic_vfrsub_vf_nxv4f32_nxv4f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -376,7 +378,7 @@ entry: %a = call @llvm.riscv.vfrsub.nxv4f32.f32( %0, float %1, - i32 %2) + iXLen %2) ret %a } @@ -386,10 +388,10 @@ declare @llvm.riscv.vfrsub.mask.nxv4f32.f32( , float, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfrsub_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +define @intrinsic_vfrsub_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -401,7 +403,7 @@ entry: %1, float %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -409,9 +411,9 @@ entry: declare @llvm.riscv.vfrsub.nxv8f32.f32( , float, - i32); + iXLen); -define @intrinsic_vfrsub_vf_nxv8f32_nxv8f32_f32( %0, float %1, i32 %2) nounwind { +define @intrinsic_vfrsub_vf_nxv8f32_nxv8f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -421,7 +423,7 @@ entry: %a = call @llvm.riscv.vfrsub.nxv8f32.f32( %0, float %1, - i32 %2) + iXLen %2) ret %a } @@ -431,10 +433,10 @@ declare @llvm.riscv.vfrsub.mask.nxv8f32.f32( , float, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfrsub_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +define @intrinsic_vfrsub_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -446,7 +448,7 @@ entry: %1, float %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -454,9 +456,9 @@ entry: declare @llvm.riscv.vfrsub.nxv16f32.f32( , float, - i32); + iXLen); -define @intrinsic_vfrsub_vf_nxv16f32_nxv16f32_f32( %0, float %1, i32 %2) nounwind { +define @intrinsic_vfrsub_vf_nxv16f32_nxv16f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu @@ -466,7 +468,7 @@ entry: %a = call @llvm.riscv.vfrsub.nxv16f32.f32( %0, float %1, - i32 %2) + iXLen %2) ret %a } @@ -476,10 +478,10 @@ declare @llvm.riscv.vfrsub.mask.nxv16f32.f32( , float, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfrsub_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +define @intrinsic_vfrsub_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu @@ -491,7 +493,7 @@ entry: %1, float %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -499,9 +501,9 @@ entry: declare @llvm.riscv.vfrsub.nxv1f64.f64( , double, - i32); + iXLen); -define @intrinsic_vfrsub_vf_nxv1f64_nxv1f64_f64( %0, double %1, i32 %2) nounwind { +define @intrinsic_vfrsub_vf_nxv1f64_nxv1f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu @@ -511,7 +513,7 @@ entry: %a = call @llvm.riscv.vfrsub.nxv1f64.f64( %0, double %1, - i32 %2) + iXLen %2) ret %a } @@ -521,10 +523,10 @@ declare @llvm.riscv.vfrsub.mask.nxv1f64.f64( , double, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfrsub_mask_vf_nxv1f64_nxv1f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { +define @intrinsic_vfrsub_mask_vf_nxv1f64_nxv1f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu @@ -536,7 +538,7 @@ entry: %1, double %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -544,9 +546,9 @@ entry: declare @llvm.riscv.vfrsub.nxv2f64.f64( , double, - i32); + iXLen); -define @intrinsic_vfrsub_vf_nxv2f64_nxv2f64_f64( %0, double %1, i32 %2) nounwind { +define @intrinsic_vfrsub_vf_nxv2f64_nxv2f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu @@ -556,7 +558,7 @@ entry: %a = call @llvm.riscv.vfrsub.nxv2f64.f64( %0, double %1, - i32 %2) + iXLen %2) ret %a } @@ -566,10 +568,10 @@ declare @llvm.riscv.vfrsub.mask.nxv2f64.f64( , double, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfrsub_mask_vf_nxv2f64_nxv2f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { +define @intrinsic_vfrsub_mask_vf_nxv2f64_nxv2f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu @@ -581,7 +583,7 @@ entry: %1, double %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -589,9 +591,9 @@ entry: declare @llvm.riscv.vfrsub.nxv4f64.f64( , double, - i32); + iXLen); -define @intrinsic_vfrsub_vf_nxv4f64_nxv4f64_f64( %0, double %1, i32 %2) nounwind { +define @intrinsic_vfrsub_vf_nxv4f64_nxv4f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu @@ -601,7 +603,7 @@ entry: %a = call @llvm.riscv.vfrsub.nxv4f64.f64( %0, double %1, - i32 %2) + iXLen %2) ret %a } @@ -611,10 +613,10 @@ declare @llvm.riscv.vfrsub.mask.nxv4f64.f64( , double, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfrsub_mask_vf_nxv4f64_nxv4f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { +define @intrinsic_vfrsub_mask_vf_nxv4f64_nxv4f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu @@ -626,7 +628,7 @@ entry: %1, double %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -634,9 +636,9 @@ entry: declare @llvm.riscv.vfrsub.nxv8f64.f64( , double, - i32); + iXLen); -define @intrinsic_vfrsub_vf_nxv8f64_nxv8f64_f64( %0, double %1, i32 %2) nounwind { +define @intrinsic_vfrsub_vf_nxv8f64_nxv8f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu @@ -646,7 +648,7 @@ entry: %a = call @llvm.riscv.vfrsub.nxv8f64.f64( %0, double %1, - i32 %2) + iXLen %2) ret %a } @@ -656,10 +658,10 @@ declare @llvm.riscv.vfrsub.mask.nxv8f64.f64( , double, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfrsub_mask_vf_nxv8f64_nxv8f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { +define @intrinsic_vfrsub_mask_vf_nxv8f64_nxv8f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu @@ -671,7 +673,7 @@ entry: %1, double %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsgnj-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfsgnj-rv64.ll deleted file mode 100644 index d71fb8fb2535..000000000000 --- a/llvm/test/CodeGen/RISCV/rvv/vfsgnj-rv64.ll +++ /dev/null @@ -1,1355 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: -target-abi=lp64d < %s | FileCheck %s -declare @llvm.riscv.vfsgnj.nxv1f16( - , - , - i64); - -define @intrinsic_vfsgnj_vv_nxv1f16_nxv1f16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv1f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnj.nxv1f16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnj.mask.nxv1f16( - , - , - , - , - i64, - i64); - -define @intrinsic_vfsgnj_mask_vv_nxv1f16_nxv1f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv1f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnj.mask.nxv1f16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnj.nxv2f16( - , - , - i64); - -define @intrinsic_vfsgnj_vv_nxv2f16_nxv2f16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv2f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnj.nxv2f16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnj.mask.nxv2f16( - , - , - , - , - i64, - i64); - -define @intrinsic_vfsgnj_mask_vv_nxv2f16_nxv2f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv2f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnj.mask.nxv2f16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnj.nxv4f16( - , - , - i64); - -define @intrinsic_vfsgnj_vv_nxv4f16_nxv4f16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv4f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnj.nxv4f16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnj.mask.nxv4f16( - , - , - , - , - i64, - i64); - -define @intrinsic_vfsgnj_mask_vv_nxv4f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv4f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnj.mask.nxv4f16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnj.nxv8f16( - , - , - i64); - -define @intrinsic_vfsgnj_vv_nxv8f16_nxv8f16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv8f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnj.nxv8f16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnj.mask.nxv8f16( - , - , - , - , - i64, - i64); - -define @intrinsic_vfsgnj_mask_vv_nxv8f16_nxv8f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv8f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnj.mask.nxv8f16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnj.nxv16f16( - , - , - i64); - -define @intrinsic_vfsgnj_vv_nxv16f16_nxv16f16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv16f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnj.nxv16f16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnj.mask.nxv16f16( - , - , - , - , - i64, - i64); - -define @intrinsic_vfsgnj_mask_vv_nxv16f16_nxv16f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv16f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnj.mask.nxv16f16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnj.nxv32f16( - , - , - i64); - -define @intrinsic_vfsgnj_vv_nxv32f16_nxv32f16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv32f16_nxv32f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnj.nxv32f16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnj.mask.nxv32f16( - , - , - , - , - i64, - i64); - -define @intrinsic_vfsgnj_mask_vv_nxv32f16_nxv32f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv32f16_nxv32f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v16, v24, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnj.mask.nxv32f16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnj.nxv1f32( - , - , - i64); - -define @intrinsic_vfsgnj_vv_nxv1f32_nxv1f32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv1f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnj.nxv1f32( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnj.mask.nxv1f32( - , - , - , - , - i64, - i64); - -define @intrinsic_vfsgnj_mask_vv_nxv1f32_nxv1f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv1f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnj.mask.nxv1f32( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnj.nxv2f32( - , - , - i64); - -define @intrinsic_vfsgnj_vv_nxv2f32_nxv2f32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv2f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnj.nxv2f32( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnj.mask.nxv2f32( - , - , - , - , - i64, - i64); - -define @intrinsic_vfsgnj_mask_vv_nxv2f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv2f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnj.mask.nxv2f32( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnj.nxv4f32( - , - , - i64); - -define @intrinsic_vfsgnj_vv_nxv4f32_nxv4f32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv4f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnj.nxv4f32( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnj.mask.nxv4f32( - , - , - , - , - i64, - i64); - -define @intrinsic_vfsgnj_mask_vv_nxv4f32_nxv4f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv4f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnj.mask.nxv4f32( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnj.nxv8f32( - , - , - i64); - -define @intrinsic_vfsgnj_vv_nxv8f32_nxv8f32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv8f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnj.nxv8f32( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnj.mask.nxv8f32( - , - , - , - , - i64, - i64); - -define @intrinsic_vfsgnj_mask_vv_nxv8f32_nxv8f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv8f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnj.mask.nxv8f32( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnj.nxv16f32( - , - , - i64); - -define @intrinsic_vfsgnj_vv_nxv16f32_nxv16f32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv16f32_nxv16f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnj.nxv16f32( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnj.mask.nxv16f32( - , - , - , - , - i64, - i64); - -define @intrinsic_vfsgnj_mask_vv_nxv16f32_nxv16f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv16f32_nxv16f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vl8re32.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v16, v24, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnj.mask.nxv16f32( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnj.nxv1f64( - , - , - i64); - -define @intrinsic_vfsgnj_vv_nxv1f64_nxv1f64( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv1f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnj.nxv1f64( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnj.mask.nxv1f64( - , - , - , - , - i64, - i64); - -define @intrinsic_vfsgnj_mask_vv_nxv1f64_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv1f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnj.mask.nxv1f64( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnj.nxv2f64( - , - , - i64); - -define @intrinsic_vfsgnj_vv_nxv2f64_nxv2f64( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv2f64_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnj.nxv2f64( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnj.mask.nxv2f64( - , - , - , - , - i64, - i64); - -define @intrinsic_vfsgnj_mask_vv_nxv2f64_nxv2f64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv2f64_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnj.mask.nxv2f64( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnj.nxv4f64( - , - , - i64); - -define @intrinsic_vfsgnj_vv_nxv4f64_nxv4f64( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv4f64_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnj.nxv4f64( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnj.mask.nxv4f64( - , - , - , - , - i64, - i64); - -define @intrinsic_vfsgnj_mask_vv_nxv4f64_nxv4f64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv4f64_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnj.mask.nxv4f64( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnj.nxv8f64( - , - , - i64); - -define @intrinsic_vfsgnj_vv_nxv8f64_nxv8f64( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv8f64_nxv8f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnj.nxv8f64( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnj.mask.nxv8f64( - , - , - , - , - i64, - i64); - -define @intrinsic_vfsgnj_mask_vv_nxv8f64_nxv8f64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv8f64_nxv8f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vl8re64.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v16, v24, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnj.mask.nxv8f64( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnj.nxv1f16.f16( - , - half, - i64); - -define @intrinsic_vfsgnj_vf_nxv1f16_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv1f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfsgnj.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnj.nxv1f16.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnj.mask.nxv1f16.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfsgnj_mask_vf_nxv1f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv1f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfsgnj.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnj.mask.nxv1f16.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnj.nxv2f16.f16( - , - half, - i64); - -define @intrinsic_vfsgnj_vf_nxv2f16_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv2f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfsgnj.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnj.nxv2f16.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnj.mask.nxv2f16.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfsgnj_mask_vf_nxv2f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv2f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfsgnj.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnj.mask.nxv2f16.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnj.nxv4f16.f16( - , - half, - i64); - -define @intrinsic_vfsgnj_vf_nxv4f16_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv4f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfsgnj.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnj.nxv4f16.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnj.mask.nxv4f16.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfsgnj_mask_vf_nxv4f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv4f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfsgnj.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnj.mask.nxv4f16.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnj.nxv8f16.f16( - , - half, - i64); - -define @intrinsic_vfsgnj_vf_nxv8f16_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv8f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfsgnj.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnj.nxv8f16.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnj.mask.nxv8f16.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfsgnj_mask_vf_nxv8f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv8f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfsgnj.vf v8, v10, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnj.mask.nxv8f16.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnj.nxv16f16.f16( - , - half, - i64); - -define @intrinsic_vfsgnj_vf_nxv16f16_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv16f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfsgnj.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnj.nxv16f16.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnj.mask.nxv16f16.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfsgnj_mask_vf_nxv16f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv16f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfsgnj.vf v8, v12, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnj.mask.nxv16f16.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnj.nxv32f16.f16( - , - half, - i64); - -define @intrinsic_vfsgnj_vf_nxv32f16_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv32f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu -; CHECK-NEXT: vfsgnj.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnj.nxv32f16.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnj.mask.nxv32f16.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfsgnj_mask_vf_nxv32f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv32f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu -; CHECK-NEXT: vfsgnj.vf v8, v16, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnj.mask.nxv32f16.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnj.nxv1f32.f32( - , - float, - i64); - -define @intrinsic_vfsgnj_vf_nxv1f32_f32( %0, float %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv1f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfsgnj.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnj.nxv1f32.f32( - %0, - float %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnj.mask.nxv1f32.f32( - , - , - float, - , - i64, - i64); - -define @intrinsic_vfsgnj_mask_vf_nxv1f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv1f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfsgnj.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnj.mask.nxv1f32.f32( - %0, - %1, - float %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnj.nxv2f32.f32( - , - float, - i64); - -define @intrinsic_vfsgnj_vf_nxv2f32_f32( %0, float %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv2f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfsgnj.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnj.nxv2f32.f32( - %0, - float %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnj.mask.nxv2f32.f32( - , - , - float, - , - i64, - i64); - -define @intrinsic_vfsgnj_mask_vf_nxv2f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv2f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfsgnj.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnj.mask.nxv2f32.f32( - %0, - %1, - float %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnj.nxv4f32.f32( - , - float, - i64); - -define @intrinsic_vfsgnj_vf_nxv4f32_f32( %0, float %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv4f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfsgnj.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnj.nxv4f32.f32( - %0, - float %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnj.mask.nxv4f32.f32( - , - , - float, - , - i64, - i64); - -define @intrinsic_vfsgnj_mask_vf_nxv4f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv4f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfsgnj.vf v8, v10, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnj.mask.nxv4f32.f32( - %0, - %1, - float %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnj.nxv8f32.f32( - , - float, - i64); - -define @intrinsic_vfsgnj_vf_nxv8f32_f32( %0, float %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv8f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfsgnj.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnj.nxv8f32.f32( - %0, - float %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnj.mask.nxv8f32.f32( - , - , - float, - , - i64, - i64); - -define @intrinsic_vfsgnj_mask_vf_nxv8f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv8f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfsgnj.vf v8, v12, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnj.mask.nxv8f32.f32( - %0, - %1, - float %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnj.nxv16f32.f32( - , - float, - i64); - -define @intrinsic_vfsgnj_vf_nxv16f32_f32( %0, float %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv16f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu -; CHECK-NEXT: vfsgnj.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnj.nxv16f32.f32( - %0, - float %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnj.mask.nxv16f32.f32( - , - , - float, - , - i64, - i64); - -define @intrinsic_vfsgnj_mask_vf_nxv16f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv16f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu -; CHECK-NEXT: vfsgnj.vf v8, v16, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnj.mask.nxv16f32.f32( - %0, - %1, - float %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnj.nxv1f64.f64( - , - double, - i64); - -define @intrinsic_vfsgnj_vf_nxv1f64_f64( %0, double %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv1f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vfsgnj.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnj.nxv1f64.f64( - %0, - double %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnj.mask.nxv1f64.f64( - , - , - double, - , - i64, - i64); - -define @intrinsic_vfsgnj_mask_vf_nxv1f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv1f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vfsgnj.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnj.mask.nxv1f64.f64( - %0, - %1, - double %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnj.nxv2f64.f64( - , - double, - i64); - -define @intrinsic_vfsgnj_vf_nxv2f64_f64( %0, double %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv2f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vfsgnj.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnj.nxv2f64.f64( - %0, - double %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnj.mask.nxv2f64.f64( - , - , - double, - , - i64, - i64); - -define @intrinsic_vfsgnj_mask_vf_nxv2f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv2f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vfsgnj.vf v8, v10, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnj.mask.nxv2f64.f64( - %0, - %1, - double %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnj.nxv4f64.f64( - , - double, - i64); - -define @intrinsic_vfsgnj_vf_nxv4f64_f64( %0, double %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv4f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vfsgnj.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnj.nxv4f64.f64( - %0, - double %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnj.mask.nxv4f64.f64( - , - , - double, - , - i64, - i64); - -define @intrinsic_vfsgnj_mask_vf_nxv4f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv4f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vfsgnj.vf v8, v12, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnj.mask.nxv4f64.f64( - %0, - %1, - double %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnj.nxv8f64.f64( - , - double, - i64); - -define @intrinsic_vfsgnj_vf_nxv8f64_f64( %0, double %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv8f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu -; CHECK-NEXT: vfsgnj.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnj.nxv8f64.f64( - %0, - double %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnj.mask.nxv8f64.f64( - , - , - double, - , - i64, - i64); - -define @intrinsic_vfsgnj_mask_vf_nxv8f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv8f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu -; CHECK-NEXT: vfsgnj.vf v8, v16, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnj.mask.nxv8f64.f64( - %0, - %1, - double %2, - %3, - i64 %4, i64 1) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsgnj-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfsgnj.ll similarity index 86% rename from llvm/test/CodeGen/RISCV/rvv/vfsgnj-rv32.ll rename to llvm/test/CodeGen/RISCV/rvv/vfsgnj.ll index c0e999a2433d..65a5592775cb 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfsgnj-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfsgnj.ll @@ -1,12 +1,14 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: -target-abi=ilp32d < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfsgnj.nxv1f16.nxv1f16( , , - i32); + iXLen); -define @intrinsic_vfsgnj_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, i32 %2) nounwind { +define @intrinsic_vfsgnj_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -16,7 +18,7 @@ entry: %a = call @llvm.riscv.vfsgnj.nxv1f16.nxv1f16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -26,10 +28,10 @@ declare @llvm.riscv.vfsgnj.mask.nxv1f16.nxv1f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnj_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnj_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -41,7 +43,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -49,9 +51,9 @@ entry: declare @llvm.riscv.vfsgnj.nxv2f16.nxv2f16( , , - i32); + iXLen); -define @intrinsic_vfsgnj_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, i32 %2) nounwind { +define @intrinsic_vfsgnj_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -61,7 +63,7 @@ entry: %a = call @llvm.riscv.vfsgnj.nxv2f16.nxv2f16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -71,10 +73,10 @@ declare @llvm.riscv.vfsgnj.mask.nxv2f16.nxv2f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnj_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnj_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -86,7 +88,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -94,9 +96,9 @@ entry: declare @llvm.riscv.vfsgnj.nxv4f16.nxv4f16( , , - i32); + iXLen); -define @intrinsic_vfsgnj_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, i32 %2) nounwind { +define @intrinsic_vfsgnj_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -106,7 +108,7 @@ entry: %a = call @llvm.riscv.vfsgnj.nxv4f16.nxv4f16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -116,10 +118,10 @@ declare @llvm.riscv.vfsgnj.mask.nxv4f16.nxv4f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnj_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnj_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -131,7 +133,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -139,9 +141,9 @@ entry: declare @llvm.riscv.vfsgnj.nxv8f16.nxv8f16( , , - i32); + iXLen); -define @intrinsic_vfsgnj_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, i32 %2) nounwind { +define @intrinsic_vfsgnj_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -151,7 +153,7 @@ entry: %a = call @llvm.riscv.vfsgnj.nxv8f16.nxv8f16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -161,10 +163,10 @@ declare @llvm.riscv.vfsgnj.mask.nxv8f16.nxv8f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnj_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnj_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -176,7 +178,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -184,9 +186,9 @@ entry: declare @llvm.riscv.vfsgnj.nxv16f16.nxv16f16( , , - i32); + iXLen); -define @intrinsic_vfsgnj_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, i32 %2) nounwind { +define @intrinsic_vfsgnj_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -196,7 +198,7 @@ entry: %a = call @llvm.riscv.vfsgnj.nxv16f16.nxv16f16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -206,10 +208,10 @@ declare @llvm.riscv.vfsgnj.mask.nxv16f16.nxv16f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnj_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnj_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -221,7 +223,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -229,9 +231,9 @@ entry: declare @llvm.riscv.vfsgnj.nxv32f16.nxv32f16( , , - i32); + iXLen); -define @intrinsic_vfsgnj_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, i32 %2) nounwind { +define @intrinsic_vfsgnj_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv32f16_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu @@ -241,7 +243,7 @@ entry: %a = call @llvm.riscv.vfsgnj.nxv32f16.nxv32f16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -251,10 +253,10 @@ declare @llvm.riscv.vfsgnj.mask.nxv32f16.nxv32f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnj_mask_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnj_mask_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv32f16_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re16.v v24, (a0) @@ -267,7 +269,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -275,9 +277,9 @@ entry: declare @llvm.riscv.vfsgnj.nxv1f32.nxv1f32( , , - i32); + iXLen); -define @intrinsic_vfsgnj_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, i32 %2) nounwind { +define @intrinsic_vfsgnj_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -287,7 +289,7 @@ entry: %a = call @llvm.riscv.vfsgnj.nxv1f32.nxv1f32( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -297,10 +299,10 @@ declare @llvm.riscv.vfsgnj.mask.nxv1f32.nxv1f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnj_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnj_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -312,7 +314,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -320,9 +322,9 @@ entry: declare @llvm.riscv.vfsgnj.nxv2f32.nxv2f32( , , - i32); + iXLen); -define @intrinsic_vfsgnj_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, i32 %2) nounwind { +define @intrinsic_vfsgnj_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -332,7 +334,7 @@ entry: %a = call @llvm.riscv.vfsgnj.nxv2f32.nxv2f32( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -342,10 +344,10 @@ declare @llvm.riscv.vfsgnj.mask.nxv2f32.nxv2f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnj_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnj_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -357,7 +359,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -365,9 +367,9 @@ entry: declare @llvm.riscv.vfsgnj.nxv4f32.nxv4f32( , , - i32); + iXLen); -define @intrinsic_vfsgnj_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, i32 %2) nounwind { +define @intrinsic_vfsgnj_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -377,7 +379,7 @@ entry: %a = call @llvm.riscv.vfsgnj.nxv4f32.nxv4f32( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -387,10 +389,10 @@ declare @llvm.riscv.vfsgnj.mask.nxv4f32.nxv4f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnj_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnj_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -402,7 +404,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -410,9 +412,9 @@ entry: declare @llvm.riscv.vfsgnj.nxv8f32.nxv8f32( , , - i32); + iXLen); -define @intrinsic_vfsgnj_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, i32 %2) nounwind { +define @intrinsic_vfsgnj_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -422,7 +424,7 @@ entry: %a = call @llvm.riscv.vfsgnj.nxv8f32.nxv8f32( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -432,10 +434,10 @@ declare @llvm.riscv.vfsgnj.mask.nxv8f32.nxv8f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnj_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnj_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -447,7 +449,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -455,9 +457,9 @@ entry: declare @llvm.riscv.vfsgnj.nxv16f32.nxv16f32( , , - i32); + iXLen); -define @intrinsic_vfsgnj_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, i32 %2) nounwind { +define @intrinsic_vfsgnj_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv16f32_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu @@ -467,7 +469,7 @@ entry: %a = call @llvm.riscv.vfsgnj.nxv16f32.nxv16f32( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -477,10 +479,10 @@ declare @llvm.riscv.vfsgnj.mask.nxv16f32.nxv16f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnj_mask_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnj_mask_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv16f32_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re32.v v24, (a0) @@ -493,7 +495,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -501,9 +503,9 @@ entry: declare @llvm.riscv.vfsgnj.nxv1f64.nxv1f64( , , - i32); + iXLen); -define @intrinsic_vfsgnj_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, i32 %2) nounwind { +define @intrinsic_vfsgnj_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu @@ -513,7 +515,7 @@ entry: %a = call @llvm.riscv.vfsgnj.nxv1f64.nxv1f64( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -523,10 +525,10 @@ declare @llvm.riscv.vfsgnj.mask.nxv1f64.nxv1f64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnj_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnj_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu @@ -538,7 +540,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -546,9 +548,9 @@ entry: declare @llvm.riscv.vfsgnj.nxv2f64.nxv2f64( , , - i32); + iXLen); -define @intrinsic_vfsgnj_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, i32 %2) nounwind { +define @intrinsic_vfsgnj_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu @@ -558,7 +560,7 @@ entry: %a = call @llvm.riscv.vfsgnj.nxv2f64.nxv2f64( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -568,10 +570,10 @@ declare @llvm.riscv.vfsgnj.mask.nxv2f64.nxv2f64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnj_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnj_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu @@ -583,7 +585,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -591,9 +593,9 @@ entry: declare @llvm.riscv.vfsgnj.nxv4f64.nxv4f64( , , - i32); + iXLen); -define @intrinsic_vfsgnj_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, i32 %2) nounwind { +define @intrinsic_vfsgnj_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu @@ -603,7 +605,7 @@ entry: %a = call @llvm.riscv.vfsgnj.nxv4f64.nxv4f64( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -613,10 +615,10 @@ declare @llvm.riscv.vfsgnj.mask.nxv4f64.nxv4f64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnj_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnj_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu @@ -628,7 +630,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -636,9 +638,9 @@ entry: declare @llvm.riscv.vfsgnj.nxv8f64.nxv8f64( , , - i32); + iXLen); -define @intrinsic_vfsgnj_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, i32 %2) nounwind { +define @intrinsic_vfsgnj_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv8f64_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu @@ -648,7 +650,7 @@ entry: %a = call @llvm.riscv.vfsgnj.nxv8f64.nxv8f64( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -658,10 +660,10 @@ declare @llvm.riscv.vfsgnj.mask.nxv8f64.nxv8f64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnj_mask_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnj_mask_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv8f64_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re64.v v24, (a0) @@ -674,7 +676,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -682,9 +684,9 @@ entry: declare @llvm.riscv.vfsgnj.nxv1f16.f16( , half, - i32); + iXLen); -define @intrinsic_vfsgnj_vf_nxv1f16_nxv1f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfsgnj_vf_nxv1f16_nxv1f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -694,7 +696,7 @@ entry: %a = call @llvm.riscv.vfsgnj.nxv1f16.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -704,10 +706,10 @@ declare @llvm.riscv.vfsgnj.mask.nxv1f16.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnj_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnj_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -719,7 +721,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -727,9 +729,9 @@ entry: declare @llvm.riscv.vfsgnj.nxv2f16.f16( , half, - i32); + iXLen); -define @intrinsic_vfsgnj_vf_nxv2f16_nxv2f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfsgnj_vf_nxv2f16_nxv2f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -739,7 +741,7 @@ entry: %a = call @llvm.riscv.vfsgnj.nxv2f16.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -749,10 +751,10 @@ declare @llvm.riscv.vfsgnj.mask.nxv2f16.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnj_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnj_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -764,7 +766,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -772,9 +774,9 @@ entry: declare @llvm.riscv.vfsgnj.nxv4f16.f16( , half, - i32); + iXLen); -define @intrinsic_vfsgnj_vf_nxv4f16_nxv4f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfsgnj_vf_nxv4f16_nxv4f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -784,7 +786,7 @@ entry: %a = call @llvm.riscv.vfsgnj.nxv4f16.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -794,10 +796,10 @@ declare @llvm.riscv.vfsgnj.mask.nxv4f16.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnj_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnj_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -809,7 +811,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -817,9 +819,9 @@ entry: declare @llvm.riscv.vfsgnj.nxv8f16.f16( , half, - i32); + iXLen); -define @intrinsic_vfsgnj_vf_nxv8f16_nxv8f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfsgnj_vf_nxv8f16_nxv8f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -829,7 +831,7 @@ entry: %a = call @llvm.riscv.vfsgnj.nxv8f16.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -839,10 +841,10 @@ declare @llvm.riscv.vfsgnj.mask.nxv8f16.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnj_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnj_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -854,7 +856,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -862,9 +864,9 @@ entry: declare @llvm.riscv.vfsgnj.nxv16f16.f16( , half, - i32); + iXLen); -define @intrinsic_vfsgnj_vf_nxv16f16_nxv16f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfsgnj_vf_nxv16f16_nxv16f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -874,7 +876,7 @@ entry: %a = call @llvm.riscv.vfsgnj.nxv16f16.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -884,10 +886,10 @@ declare @llvm.riscv.vfsgnj.mask.nxv16f16.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnj_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnj_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -899,7 +901,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -907,9 +909,9 @@ entry: declare @llvm.riscv.vfsgnj.nxv32f16.f16( , half, - i32); + iXLen); -define @intrinsic_vfsgnj_vf_nxv32f16_nxv32f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfsgnj_vf_nxv32f16_nxv32f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu @@ -919,7 +921,7 @@ entry: %a = call @llvm.riscv.vfsgnj.nxv32f16.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -929,10 +931,10 @@ declare @llvm.riscv.vfsgnj.mask.nxv32f16.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnj_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnj_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu @@ -944,7 +946,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -952,9 +954,9 @@ entry: declare @llvm.riscv.vfsgnj.nxv1f32.f32( , float, - i32); + iXLen); -define @intrinsic_vfsgnj_vf_nxv1f32_nxv1f32_f32( %0, float %1, i32 %2) nounwind { +define @intrinsic_vfsgnj_vf_nxv1f32_nxv1f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -964,7 +966,7 @@ entry: %a = call @llvm.riscv.vfsgnj.nxv1f32.f32( %0, float %1, - i32 %2) + iXLen %2) ret %a } @@ -974,10 +976,10 @@ declare @llvm.riscv.vfsgnj.mask.nxv1f32.f32( , float, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnj_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnj_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -989,7 +991,7 @@ entry: %1, float %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -997,9 +999,9 @@ entry: declare @llvm.riscv.vfsgnj.nxv2f32.f32( , float, - i32); + iXLen); -define @intrinsic_vfsgnj_vf_nxv2f32_nxv2f32_f32( %0, float %1, i32 %2) nounwind { +define @intrinsic_vfsgnj_vf_nxv2f32_nxv2f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -1009,7 +1011,7 @@ entry: %a = call @llvm.riscv.vfsgnj.nxv2f32.f32( %0, float %1, - i32 %2) + iXLen %2) ret %a } @@ -1019,10 +1021,10 @@ declare @llvm.riscv.vfsgnj.mask.nxv2f32.f32( , float, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnj_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnj_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -1034,7 +1036,7 @@ entry: %1, float %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1042,9 +1044,9 @@ entry: declare @llvm.riscv.vfsgnj.nxv4f32.f32( , float, - i32); + iXLen); -define @intrinsic_vfsgnj_vf_nxv4f32_nxv4f32_f32( %0, float %1, i32 %2) nounwind { +define @intrinsic_vfsgnj_vf_nxv4f32_nxv4f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -1054,7 +1056,7 @@ entry: %a = call @llvm.riscv.vfsgnj.nxv4f32.f32( %0, float %1, - i32 %2) + iXLen %2) ret %a } @@ -1064,10 +1066,10 @@ declare @llvm.riscv.vfsgnj.mask.nxv4f32.f32( , float, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnj_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnj_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -1079,7 +1081,7 @@ entry: %1, float %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1087,9 +1089,9 @@ entry: declare @llvm.riscv.vfsgnj.nxv8f32.f32( , float, - i32); + iXLen); -define @intrinsic_vfsgnj_vf_nxv8f32_nxv8f32_f32( %0, float %1, i32 %2) nounwind { +define @intrinsic_vfsgnj_vf_nxv8f32_nxv8f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -1099,7 +1101,7 @@ entry: %a = call @llvm.riscv.vfsgnj.nxv8f32.f32( %0, float %1, - i32 %2) + iXLen %2) ret %a } @@ -1109,10 +1111,10 @@ declare @llvm.riscv.vfsgnj.mask.nxv8f32.f32( , float, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnj_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnj_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -1124,7 +1126,7 @@ entry: %1, float %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1132,9 +1134,9 @@ entry: declare @llvm.riscv.vfsgnj.nxv16f32.f32( , float, - i32); + iXLen); -define @intrinsic_vfsgnj_vf_nxv16f32_nxv16f32_f32( %0, float %1, i32 %2) nounwind { +define @intrinsic_vfsgnj_vf_nxv16f32_nxv16f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu @@ -1144,7 +1146,7 @@ entry: %a = call @llvm.riscv.vfsgnj.nxv16f32.f32( %0, float %1, - i32 %2) + iXLen %2) ret %a } @@ -1154,10 +1156,10 @@ declare @llvm.riscv.vfsgnj.mask.nxv16f32.f32( , float, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnj_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnj_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu @@ -1169,7 +1171,7 @@ entry: %1, float %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1177,9 +1179,9 @@ entry: declare @llvm.riscv.vfsgnj.nxv1f64.f64( , double, - i32); + iXLen); -define @intrinsic_vfsgnj_vf_nxv1f64_nxv1f64_f64( %0, double %1, i32 %2) nounwind { +define @intrinsic_vfsgnj_vf_nxv1f64_nxv1f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu @@ -1189,7 +1191,7 @@ entry: %a = call @llvm.riscv.vfsgnj.nxv1f64.f64( %0, double %1, - i32 %2) + iXLen %2) ret %a } @@ -1199,10 +1201,10 @@ declare @llvm.riscv.vfsgnj.mask.nxv1f64.f64( , double, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnj_mask_vf_nxv1f64_nxv1f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnj_mask_vf_nxv1f64_nxv1f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu @@ -1214,7 +1216,7 @@ entry: %1, double %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1222,9 +1224,9 @@ entry: declare @llvm.riscv.vfsgnj.nxv2f64.f64( , double, - i32); + iXLen); -define @intrinsic_vfsgnj_vf_nxv2f64_nxv2f64_f64( %0, double %1, i32 %2) nounwind { +define @intrinsic_vfsgnj_vf_nxv2f64_nxv2f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu @@ -1234,7 +1236,7 @@ entry: %a = call @llvm.riscv.vfsgnj.nxv2f64.f64( %0, double %1, - i32 %2) + iXLen %2) ret %a } @@ -1244,10 +1246,10 @@ declare @llvm.riscv.vfsgnj.mask.nxv2f64.f64( , double, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnj_mask_vf_nxv2f64_nxv2f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnj_mask_vf_nxv2f64_nxv2f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu @@ -1259,7 +1261,7 @@ entry: %1, double %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1267,9 +1269,9 @@ entry: declare @llvm.riscv.vfsgnj.nxv4f64.f64( , double, - i32); + iXLen); -define @intrinsic_vfsgnj_vf_nxv4f64_nxv4f64_f64( %0, double %1, i32 %2) nounwind { +define @intrinsic_vfsgnj_vf_nxv4f64_nxv4f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu @@ -1279,7 +1281,7 @@ entry: %a = call @llvm.riscv.vfsgnj.nxv4f64.f64( %0, double %1, - i32 %2) + iXLen %2) ret %a } @@ -1289,10 +1291,10 @@ declare @llvm.riscv.vfsgnj.mask.nxv4f64.f64( , double, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnj_mask_vf_nxv4f64_nxv4f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnj_mask_vf_nxv4f64_nxv4f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu @@ -1304,7 +1306,7 @@ entry: %1, double %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1312,9 +1314,9 @@ entry: declare @llvm.riscv.vfsgnj.nxv8f64.f64( , double, - i32); + iXLen); -define @intrinsic_vfsgnj_vf_nxv8f64_nxv8f64_f64( %0, double %1, i32 %2) nounwind { +define @intrinsic_vfsgnj_vf_nxv8f64_nxv8f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu @@ -1324,7 +1326,7 @@ entry: %a = call @llvm.riscv.vfsgnj.nxv8f64.f64( %0, double %1, - i32 %2) + iXLen %2) ret %a } @@ -1334,10 +1336,10 @@ declare @llvm.riscv.vfsgnj.mask.nxv8f64.f64( , double, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnj_mask_vf_nxv8f64_nxv8f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnj_mask_vf_nxv8f64_nxv8f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu @@ -1349,7 +1351,7 @@ entry: %1, double %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsgnjn-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfsgnjn-rv64.ll deleted file mode 100644 index f751fd740638..000000000000 --- a/llvm/test/CodeGen/RISCV/rvv/vfsgnjn-rv64.ll +++ /dev/null @@ -1,1355 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: -target-abi=lp64d < %s | FileCheck %s -declare @llvm.riscv.vfsgnjn.nxv1f16( - , - , - i64); - -define @intrinsic_vfsgnjn_vv_nxv1f16_nxv1f16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv1f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjn.nxv1f16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnjn.mask.nxv1f16( - , - , - , - , - i64, - i64); - -define @intrinsic_vfsgnjn_mask_vv_nxv1f16_nxv1f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv1f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjn.mask.nxv1f16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnjn.nxv2f16( - , - , - i64); - -define @intrinsic_vfsgnjn_vv_nxv2f16_nxv2f16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv2f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjn.nxv2f16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnjn.mask.nxv2f16( - , - , - , - , - i64, - i64); - -define @intrinsic_vfsgnjn_mask_vv_nxv2f16_nxv2f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv2f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjn.mask.nxv2f16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnjn.nxv4f16( - , - , - i64); - -define @intrinsic_vfsgnjn_vv_nxv4f16_nxv4f16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv4f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjn.nxv4f16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnjn.mask.nxv4f16( - , - , - , - , - i64, - i64); - -define @intrinsic_vfsgnjn_mask_vv_nxv4f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv4f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjn.mask.nxv4f16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnjn.nxv8f16( - , - , - i64); - -define @intrinsic_vfsgnjn_vv_nxv8f16_nxv8f16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv8f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjn.nxv8f16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnjn.mask.nxv8f16( - , - , - , - , - i64, - i64); - -define @intrinsic_vfsgnjn_mask_vv_nxv8f16_nxv8f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv8f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjn.mask.nxv8f16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnjn.nxv16f16( - , - , - i64); - -define @intrinsic_vfsgnjn_vv_nxv16f16_nxv16f16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv16f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjn.nxv16f16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnjn.mask.nxv16f16( - , - , - , - , - i64, - i64); - -define @intrinsic_vfsgnjn_mask_vv_nxv16f16_nxv16f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv16f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjn.mask.nxv16f16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnjn.nxv32f16( - , - , - i64); - -define @intrinsic_vfsgnjn_vv_nxv32f16_nxv32f16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv32f16_nxv32f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjn.nxv32f16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnjn.mask.nxv32f16( - , - , - , - , - i64, - i64); - -define @intrinsic_vfsgnjn_mask_vv_nxv32f16_nxv32f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv32f16_nxv32f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v16, v24, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjn.mask.nxv32f16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnjn.nxv1f32( - , - , - i64); - -define @intrinsic_vfsgnjn_vv_nxv1f32_nxv1f32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv1f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjn.nxv1f32( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnjn.mask.nxv1f32( - , - , - , - , - i64, - i64); - -define @intrinsic_vfsgnjn_mask_vv_nxv1f32_nxv1f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv1f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjn.mask.nxv1f32( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnjn.nxv2f32( - , - , - i64); - -define @intrinsic_vfsgnjn_vv_nxv2f32_nxv2f32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv2f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjn.nxv2f32( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnjn.mask.nxv2f32( - , - , - , - , - i64, - i64); - -define @intrinsic_vfsgnjn_mask_vv_nxv2f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv2f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjn.mask.nxv2f32( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnjn.nxv4f32( - , - , - i64); - -define @intrinsic_vfsgnjn_vv_nxv4f32_nxv4f32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv4f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjn.nxv4f32( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnjn.mask.nxv4f32( - , - , - , - , - i64, - i64); - -define @intrinsic_vfsgnjn_mask_vv_nxv4f32_nxv4f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv4f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjn.mask.nxv4f32( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnjn.nxv8f32( - , - , - i64); - -define @intrinsic_vfsgnjn_vv_nxv8f32_nxv8f32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv8f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjn.nxv8f32( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnjn.mask.nxv8f32( - , - , - , - , - i64, - i64); - -define @intrinsic_vfsgnjn_mask_vv_nxv8f32_nxv8f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv8f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjn.mask.nxv8f32( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnjn.nxv16f32( - , - , - i64); - -define @intrinsic_vfsgnjn_vv_nxv16f32_nxv16f32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv16f32_nxv16f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjn.nxv16f32( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnjn.mask.nxv16f32( - , - , - , - , - i64, - i64); - -define @intrinsic_vfsgnjn_mask_vv_nxv16f32_nxv16f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv16f32_nxv16f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vl8re32.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v16, v24, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjn.mask.nxv16f32( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnjn.nxv1f64( - , - , - i64); - -define @intrinsic_vfsgnjn_vv_nxv1f64_nxv1f64( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv1f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjn.nxv1f64( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnjn.mask.nxv1f64( - , - , - , - , - i64, - i64); - -define @intrinsic_vfsgnjn_mask_vv_nxv1f64_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv1f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjn.mask.nxv1f64( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnjn.nxv2f64( - , - , - i64); - -define @intrinsic_vfsgnjn_vv_nxv2f64_nxv2f64( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv2f64_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjn.nxv2f64( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnjn.mask.nxv2f64( - , - , - , - , - i64, - i64); - -define @intrinsic_vfsgnjn_mask_vv_nxv2f64_nxv2f64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv2f64_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjn.mask.nxv2f64( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnjn.nxv4f64( - , - , - i64); - -define @intrinsic_vfsgnjn_vv_nxv4f64_nxv4f64( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv4f64_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjn.nxv4f64( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnjn.mask.nxv4f64( - , - , - , - , - i64, - i64); - -define @intrinsic_vfsgnjn_mask_vv_nxv4f64_nxv4f64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv4f64_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjn.mask.nxv4f64( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnjn.nxv8f64( - , - , - i64); - -define @intrinsic_vfsgnjn_vv_nxv8f64_nxv8f64( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv8f64_nxv8f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjn.nxv8f64( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnjn.mask.nxv8f64( - , - , - , - , - i64, - i64); - -define @intrinsic_vfsgnjn_mask_vv_nxv8f64_nxv8f64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv8f64_nxv8f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vl8re64.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v16, v24, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjn.mask.nxv8f64( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnjn.nxv1f16.f16( - , - half, - i64); - -define @intrinsic_vfsgnjn_vf_nxv1f16_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv1f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfsgnjn.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjn.nxv1f16.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnjn.mask.nxv1f16.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfsgnjn_mask_vf_nxv1f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv1f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfsgnjn.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjn.mask.nxv1f16.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnjn.nxv2f16.f16( - , - half, - i64); - -define @intrinsic_vfsgnjn_vf_nxv2f16_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv2f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfsgnjn.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjn.nxv2f16.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnjn.mask.nxv2f16.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfsgnjn_mask_vf_nxv2f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv2f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfsgnjn.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjn.mask.nxv2f16.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnjn.nxv4f16.f16( - , - half, - i64); - -define @intrinsic_vfsgnjn_vf_nxv4f16_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv4f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfsgnjn.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjn.nxv4f16.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnjn.mask.nxv4f16.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfsgnjn_mask_vf_nxv4f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv4f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfsgnjn.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjn.mask.nxv4f16.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnjn.nxv8f16.f16( - , - half, - i64); - -define @intrinsic_vfsgnjn_vf_nxv8f16_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv8f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfsgnjn.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjn.nxv8f16.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnjn.mask.nxv8f16.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfsgnjn_mask_vf_nxv8f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv8f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfsgnjn.vf v8, v10, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjn.mask.nxv8f16.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnjn.nxv16f16.f16( - , - half, - i64); - -define @intrinsic_vfsgnjn_vf_nxv16f16_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv16f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfsgnjn.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjn.nxv16f16.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnjn.mask.nxv16f16.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfsgnjn_mask_vf_nxv16f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv16f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfsgnjn.vf v8, v12, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjn.mask.nxv16f16.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnjn.nxv32f16.f16( - , - half, - i64); - -define @intrinsic_vfsgnjn_vf_nxv32f16_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv32f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu -; CHECK-NEXT: vfsgnjn.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjn.nxv32f16.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnjn.mask.nxv32f16.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfsgnjn_mask_vf_nxv32f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv32f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu -; CHECK-NEXT: vfsgnjn.vf v8, v16, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjn.mask.nxv32f16.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnjn.nxv1f32.f32( - , - float, - i64); - -define @intrinsic_vfsgnjn_vf_nxv1f32_f32( %0, float %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv1f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfsgnjn.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjn.nxv1f32.f32( - %0, - float %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnjn.mask.nxv1f32.f32( - , - , - float, - , - i64, - i64); - -define @intrinsic_vfsgnjn_mask_vf_nxv1f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv1f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfsgnjn.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjn.mask.nxv1f32.f32( - %0, - %1, - float %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnjn.nxv2f32.f32( - , - float, - i64); - -define @intrinsic_vfsgnjn_vf_nxv2f32_f32( %0, float %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv2f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfsgnjn.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjn.nxv2f32.f32( - %0, - float %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnjn.mask.nxv2f32.f32( - , - , - float, - , - i64, - i64); - -define @intrinsic_vfsgnjn_mask_vf_nxv2f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv2f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfsgnjn.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjn.mask.nxv2f32.f32( - %0, - %1, - float %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnjn.nxv4f32.f32( - , - float, - i64); - -define @intrinsic_vfsgnjn_vf_nxv4f32_f32( %0, float %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv4f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfsgnjn.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjn.nxv4f32.f32( - %0, - float %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnjn.mask.nxv4f32.f32( - , - , - float, - , - i64, - i64); - -define @intrinsic_vfsgnjn_mask_vf_nxv4f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv4f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfsgnjn.vf v8, v10, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjn.mask.nxv4f32.f32( - %0, - %1, - float %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnjn.nxv8f32.f32( - , - float, - i64); - -define @intrinsic_vfsgnjn_vf_nxv8f32_f32( %0, float %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv8f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfsgnjn.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjn.nxv8f32.f32( - %0, - float %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnjn.mask.nxv8f32.f32( - , - , - float, - , - i64, - i64); - -define @intrinsic_vfsgnjn_mask_vf_nxv8f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv8f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfsgnjn.vf v8, v12, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjn.mask.nxv8f32.f32( - %0, - %1, - float %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnjn.nxv16f32.f32( - , - float, - i64); - -define @intrinsic_vfsgnjn_vf_nxv16f32_f32( %0, float %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv16f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu -; CHECK-NEXT: vfsgnjn.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjn.nxv16f32.f32( - %0, - float %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnjn.mask.nxv16f32.f32( - , - , - float, - , - i64, - i64); - -define @intrinsic_vfsgnjn_mask_vf_nxv16f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv16f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu -; CHECK-NEXT: vfsgnjn.vf v8, v16, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjn.mask.nxv16f32.f32( - %0, - %1, - float %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnjn.nxv1f64.f64( - , - double, - i64); - -define @intrinsic_vfsgnjn_vf_nxv1f64_f64( %0, double %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv1f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vfsgnjn.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjn.nxv1f64.f64( - %0, - double %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnjn.mask.nxv1f64.f64( - , - , - double, - , - i64, - i64); - -define @intrinsic_vfsgnjn_mask_vf_nxv1f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv1f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vfsgnjn.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjn.mask.nxv1f64.f64( - %0, - %1, - double %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnjn.nxv2f64.f64( - , - double, - i64); - -define @intrinsic_vfsgnjn_vf_nxv2f64_f64( %0, double %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv2f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vfsgnjn.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjn.nxv2f64.f64( - %0, - double %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnjn.mask.nxv2f64.f64( - , - , - double, - , - i64, - i64); - -define @intrinsic_vfsgnjn_mask_vf_nxv2f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv2f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vfsgnjn.vf v8, v10, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjn.mask.nxv2f64.f64( - %0, - %1, - double %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnjn.nxv4f64.f64( - , - double, - i64); - -define @intrinsic_vfsgnjn_vf_nxv4f64_f64( %0, double %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv4f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vfsgnjn.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjn.nxv4f64.f64( - %0, - double %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnjn.mask.nxv4f64.f64( - , - , - double, - , - i64, - i64); - -define @intrinsic_vfsgnjn_mask_vf_nxv4f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv4f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vfsgnjn.vf v8, v12, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjn.mask.nxv4f64.f64( - %0, - %1, - double %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnjn.nxv8f64.f64( - , - double, - i64); - -define @intrinsic_vfsgnjn_vf_nxv8f64_f64( %0, double %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv8f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu -; CHECK-NEXT: vfsgnjn.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjn.nxv8f64.f64( - %0, - double %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnjn.mask.nxv8f64.f64( - , - , - double, - , - i64, - i64); - -define @intrinsic_vfsgnjn_mask_vf_nxv8f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv8f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu -; CHECK-NEXT: vfsgnjn.vf v8, v16, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjn.mask.nxv8f64.f64( - %0, - %1, - double %2, - %3, - i64 %4, i64 1) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsgnjn-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfsgnjn.ll similarity index 86% rename from llvm/test/CodeGen/RISCV/rvv/vfsgnjn-rv32.ll rename to llvm/test/CodeGen/RISCV/rvv/vfsgnjn.ll index 0287f9ea2cbf..f16c8a6db1fb 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfsgnjn-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfsgnjn.ll @@ -1,12 +1,14 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: -target-abi=ilp32d < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfsgnjn.nxv1f16.nxv1f16( , , - i32); + iXLen); -define @intrinsic_vfsgnjn_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, i32 %2) nounwind { +define @intrinsic_vfsgnjn_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -16,7 +18,7 @@ entry: %a = call @llvm.riscv.vfsgnjn.nxv1f16.nxv1f16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -26,10 +28,10 @@ declare @llvm.riscv.vfsgnjn.mask.nxv1f16.nxv1f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnjn_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnjn_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -41,7 +43,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -49,9 +51,9 @@ entry: declare @llvm.riscv.vfsgnjn.nxv2f16.nxv2f16( , , - i32); + iXLen); -define @intrinsic_vfsgnjn_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, i32 %2) nounwind { +define @intrinsic_vfsgnjn_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -61,7 +63,7 @@ entry: %a = call @llvm.riscv.vfsgnjn.nxv2f16.nxv2f16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -71,10 +73,10 @@ declare @llvm.riscv.vfsgnjn.mask.nxv2f16.nxv2f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnjn_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnjn_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -86,7 +88,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -94,9 +96,9 @@ entry: declare @llvm.riscv.vfsgnjn.nxv4f16.nxv4f16( , , - i32); + iXLen); -define @intrinsic_vfsgnjn_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, i32 %2) nounwind { +define @intrinsic_vfsgnjn_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -106,7 +108,7 @@ entry: %a = call @llvm.riscv.vfsgnjn.nxv4f16.nxv4f16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -116,10 +118,10 @@ declare @llvm.riscv.vfsgnjn.mask.nxv4f16.nxv4f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnjn_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnjn_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -131,7 +133,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -139,9 +141,9 @@ entry: declare @llvm.riscv.vfsgnjn.nxv8f16.nxv8f16( , , - i32); + iXLen); -define @intrinsic_vfsgnjn_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, i32 %2) nounwind { +define @intrinsic_vfsgnjn_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -151,7 +153,7 @@ entry: %a = call @llvm.riscv.vfsgnjn.nxv8f16.nxv8f16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -161,10 +163,10 @@ declare @llvm.riscv.vfsgnjn.mask.nxv8f16.nxv8f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnjn_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnjn_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -176,7 +178,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -184,9 +186,9 @@ entry: declare @llvm.riscv.vfsgnjn.nxv16f16.nxv16f16( , , - i32); + iXLen); -define @intrinsic_vfsgnjn_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, i32 %2) nounwind { +define @intrinsic_vfsgnjn_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -196,7 +198,7 @@ entry: %a = call @llvm.riscv.vfsgnjn.nxv16f16.nxv16f16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -206,10 +208,10 @@ declare @llvm.riscv.vfsgnjn.mask.nxv16f16.nxv16f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnjn_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnjn_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -221,7 +223,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -229,9 +231,9 @@ entry: declare @llvm.riscv.vfsgnjn.nxv32f16.nxv32f16( , , - i32); + iXLen); -define @intrinsic_vfsgnjn_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, i32 %2) nounwind { +define @intrinsic_vfsgnjn_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv32f16_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu @@ -241,7 +243,7 @@ entry: %a = call @llvm.riscv.vfsgnjn.nxv32f16.nxv32f16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -251,10 +253,10 @@ declare @llvm.riscv.vfsgnjn.mask.nxv32f16.nxv32f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnjn_mask_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnjn_mask_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv32f16_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re16.v v24, (a0) @@ -267,7 +269,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -275,9 +277,9 @@ entry: declare @llvm.riscv.vfsgnjn.nxv1f32.nxv1f32( , , - i32); + iXLen); -define @intrinsic_vfsgnjn_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, i32 %2) nounwind { +define @intrinsic_vfsgnjn_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -287,7 +289,7 @@ entry: %a = call @llvm.riscv.vfsgnjn.nxv1f32.nxv1f32( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -297,10 +299,10 @@ declare @llvm.riscv.vfsgnjn.mask.nxv1f32.nxv1f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnjn_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnjn_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -312,7 +314,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -320,9 +322,9 @@ entry: declare @llvm.riscv.vfsgnjn.nxv2f32.nxv2f32( , , - i32); + iXLen); -define @intrinsic_vfsgnjn_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, i32 %2) nounwind { +define @intrinsic_vfsgnjn_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -332,7 +334,7 @@ entry: %a = call @llvm.riscv.vfsgnjn.nxv2f32.nxv2f32( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -342,10 +344,10 @@ declare @llvm.riscv.vfsgnjn.mask.nxv2f32.nxv2f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnjn_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnjn_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -357,7 +359,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -365,9 +367,9 @@ entry: declare @llvm.riscv.vfsgnjn.nxv4f32.nxv4f32( , , - i32); + iXLen); -define @intrinsic_vfsgnjn_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, i32 %2) nounwind { +define @intrinsic_vfsgnjn_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -377,7 +379,7 @@ entry: %a = call @llvm.riscv.vfsgnjn.nxv4f32.nxv4f32( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -387,10 +389,10 @@ declare @llvm.riscv.vfsgnjn.mask.nxv4f32.nxv4f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnjn_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnjn_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -402,7 +404,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -410,9 +412,9 @@ entry: declare @llvm.riscv.vfsgnjn.nxv8f32.nxv8f32( , , - i32); + iXLen); -define @intrinsic_vfsgnjn_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, i32 %2) nounwind { +define @intrinsic_vfsgnjn_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -422,7 +424,7 @@ entry: %a = call @llvm.riscv.vfsgnjn.nxv8f32.nxv8f32( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -432,10 +434,10 @@ declare @llvm.riscv.vfsgnjn.mask.nxv8f32.nxv8f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnjn_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnjn_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -447,7 +449,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -455,9 +457,9 @@ entry: declare @llvm.riscv.vfsgnjn.nxv16f32.nxv16f32( , , - i32); + iXLen); -define @intrinsic_vfsgnjn_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, i32 %2) nounwind { +define @intrinsic_vfsgnjn_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv16f32_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu @@ -467,7 +469,7 @@ entry: %a = call @llvm.riscv.vfsgnjn.nxv16f32.nxv16f32( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -477,10 +479,10 @@ declare @llvm.riscv.vfsgnjn.mask.nxv16f32.nxv16f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnjn_mask_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnjn_mask_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv16f32_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re32.v v24, (a0) @@ -493,7 +495,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -501,9 +503,9 @@ entry: declare @llvm.riscv.vfsgnjn.nxv1f64.nxv1f64( , , - i32); + iXLen); -define @intrinsic_vfsgnjn_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, i32 %2) nounwind { +define @intrinsic_vfsgnjn_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu @@ -513,7 +515,7 @@ entry: %a = call @llvm.riscv.vfsgnjn.nxv1f64.nxv1f64( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -523,10 +525,10 @@ declare @llvm.riscv.vfsgnjn.mask.nxv1f64.nxv1f64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnjn_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnjn_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu @@ -538,7 +540,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -546,9 +548,9 @@ entry: declare @llvm.riscv.vfsgnjn.nxv2f64.nxv2f64( , , - i32); + iXLen); -define @intrinsic_vfsgnjn_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, i32 %2) nounwind { +define @intrinsic_vfsgnjn_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu @@ -558,7 +560,7 @@ entry: %a = call @llvm.riscv.vfsgnjn.nxv2f64.nxv2f64( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -568,10 +570,10 @@ declare @llvm.riscv.vfsgnjn.mask.nxv2f64.nxv2f64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnjn_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnjn_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu @@ -583,7 +585,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -591,9 +593,9 @@ entry: declare @llvm.riscv.vfsgnjn.nxv4f64.nxv4f64( , , - i32); + iXLen); -define @intrinsic_vfsgnjn_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, i32 %2) nounwind { +define @intrinsic_vfsgnjn_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu @@ -603,7 +605,7 @@ entry: %a = call @llvm.riscv.vfsgnjn.nxv4f64.nxv4f64( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -613,10 +615,10 @@ declare @llvm.riscv.vfsgnjn.mask.nxv4f64.nxv4f64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnjn_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnjn_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu @@ -628,7 +630,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -636,9 +638,9 @@ entry: declare @llvm.riscv.vfsgnjn.nxv8f64.nxv8f64( , , - i32); + iXLen); -define @intrinsic_vfsgnjn_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, i32 %2) nounwind { +define @intrinsic_vfsgnjn_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv8f64_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu @@ -648,7 +650,7 @@ entry: %a = call @llvm.riscv.vfsgnjn.nxv8f64.nxv8f64( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -658,10 +660,10 @@ declare @llvm.riscv.vfsgnjn.mask.nxv8f64.nxv8f64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnjn_mask_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnjn_mask_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv8f64_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re64.v v24, (a0) @@ -674,7 +676,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -682,9 +684,9 @@ entry: declare @llvm.riscv.vfsgnjn.nxv1f16.f16( , half, - i32); + iXLen); -define @intrinsic_vfsgnjn_vf_nxv1f16_nxv1f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfsgnjn_vf_nxv1f16_nxv1f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -694,7 +696,7 @@ entry: %a = call @llvm.riscv.vfsgnjn.nxv1f16.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -704,10 +706,10 @@ declare @llvm.riscv.vfsgnjn.mask.nxv1f16.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnjn_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnjn_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -719,7 +721,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -727,9 +729,9 @@ entry: declare @llvm.riscv.vfsgnjn.nxv2f16.f16( , half, - i32); + iXLen); -define @intrinsic_vfsgnjn_vf_nxv2f16_nxv2f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfsgnjn_vf_nxv2f16_nxv2f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -739,7 +741,7 @@ entry: %a = call @llvm.riscv.vfsgnjn.nxv2f16.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -749,10 +751,10 @@ declare @llvm.riscv.vfsgnjn.mask.nxv2f16.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnjn_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnjn_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -764,7 +766,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -772,9 +774,9 @@ entry: declare @llvm.riscv.vfsgnjn.nxv4f16.f16( , half, - i32); + iXLen); -define @intrinsic_vfsgnjn_vf_nxv4f16_nxv4f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfsgnjn_vf_nxv4f16_nxv4f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -784,7 +786,7 @@ entry: %a = call @llvm.riscv.vfsgnjn.nxv4f16.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -794,10 +796,10 @@ declare @llvm.riscv.vfsgnjn.mask.nxv4f16.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnjn_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnjn_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -809,7 +811,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -817,9 +819,9 @@ entry: declare @llvm.riscv.vfsgnjn.nxv8f16.f16( , half, - i32); + iXLen); -define @intrinsic_vfsgnjn_vf_nxv8f16_nxv8f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfsgnjn_vf_nxv8f16_nxv8f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -829,7 +831,7 @@ entry: %a = call @llvm.riscv.vfsgnjn.nxv8f16.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -839,10 +841,10 @@ declare @llvm.riscv.vfsgnjn.mask.nxv8f16.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnjn_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnjn_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -854,7 +856,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -862,9 +864,9 @@ entry: declare @llvm.riscv.vfsgnjn.nxv16f16.f16( , half, - i32); + iXLen); -define @intrinsic_vfsgnjn_vf_nxv16f16_nxv16f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfsgnjn_vf_nxv16f16_nxv16f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -874,7 +876,7 @@ entry: %a = call @llvm.riscv.vfsgnjn.nxv16f16.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -884,10 +886,10 @@ declare @llvm.riscv.vfsgnjn.mask.nxv16f16.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnjn_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnjn_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -899,7 +901,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -907,9 +909,9 @@ entry: declare @llvm.riscv.vfsgnjn.nxv32f16.f16( , half, - i32); + iXLen); -define @intrinsic_vfsgnjn_vf_nxv32f16_nxv32f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfsgnjn_vf_nxv32f16_nxv32f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu @@ -919,7 +921,7 @@ entry: %a = call @llvm.riscv.vfsgnjn.nxv32f16.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -929,10 +931,10 @@ declare @llvm.riscv.vfsgnjn.mask.nxv32f16.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnjn_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnjn_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu @@ -944,7 +946,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -952,9 +954,9 @@ entry: declare @llvm.riscv.vfsgnjn.nxv1f32.f32( , float, - i32); + iXLen); -define @intrinsic_vfsgnjn_vf_nxv1f32_nxv1f32_f32( %0, float %1, i32 %2) nounwind { +define @intrinsic_vfsgnjn_vf_nxv1f32_nxv1f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -964,7 +966,7 @@ entry: %a = call @llvm.riscv.vfsgnjn.nxv1f32.f32( %0, float %1, - i32 %2) + iXLen %2) ret %a } @@ -974,10 +976,10 @@ declare @llvm.riscv.vfsgnjn.mask.nxv1f32.f32( , float, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnjn_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnjn_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -989,7 +991,7 @@ entry: %1, float %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -997,9 +999,9 @@ entry: declare @llvm.riscv.vfsgnjn.nxv2f32.f32( , float, - i32); + iXLen); -define @intrinsic_vfsgnjn_vf_nxv2f32_nxv2f32_f32( %0, float %1, i32 %2) nounwind { +define @intrinsic_vfsgnjn_vf_nxv2f32_nxv2f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -1009,7 +1011,7 @@ entry: %a = call @llvm.riscv.vfsgnjn.nxv2f32.f32( %0, float %1, - i32 %2) + iXLen %2) ret %a } @@ -1019,10 +1021,10 @@ declare @llvm.riscv.vfsgnjn.mask.nxv2f32.f32( , float, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnjn_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnjn_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -1034,7 +1036,7 @@ entry: %1, float %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1042,9 +1044,9 @@ entry: declare @llvm.riscv.vfsgnjn.nxv4f32.f32( , float, - i32); + iXLen); -define @intrinsic_vfsgnjn_vf_nxv4f32_nxv4f32_f32( %0, float %1, i32 %2) nounwind { +define @intrinsic_vfsgnjn_vf_nxv4f32_nxv4f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -1054,7 +1056,7 @@ entry: %a = call @llvm.riscv.vfsgnjn.nxv4f32.f32( %0, float %1, - i32 %2) + iXLen %2) ret %a } @@ -1064,10 +1066,10 @@ declare @llvm.riscv.vfsgnjn.mask.nxv4f32.f32( , float, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnjn_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnjn_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -1079,7 +1081,7 @@ entry: %1, float %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1087,9 +1089,9 @@ entry: declare @llvm.riscv.vfsgnjn.nxv8f32.f32( , float, - i32); + iXLen); -define @intrinsic_vfsgnjn_vf_nxv8f32_nxv8f32_f32( %0, float %1, i32 %2) nounwind { +define @intrinsic_vfsgnjn_vf_nxv8f32_nxv8f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -1099,7 +1101,7 @@ entry: %a = call @llvm.riscv.vfsgnjn.nxv8f32.f32( %0, float %1, - i32 %2) + iXLen %2) ret %a } @@ -1109,10 +1111,10 @@ declare @llvm.riscv.vfsgnjn.mask.nxv8f32.f32( , float, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnjn_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnjn_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -1124,7 +1126,7 @@ entry: %1, float %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1132,9 +1134,9 @@ entry: declare @llvm.riscv.vfsgnjn.nxv16f32.f32( , float, - i32); + iXLen); -define @intrinsic_vfsgnjn_vf_nxv16f32_nxv16f32_f32( %0, float %1, i32 %2) nounwind { +define @intrinsic_vfsgnjn_vf_nxv16f32_nxv16f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu @@ -1144,7 +1146,7 @@ entry: %a = call @llvm.riscv.vfsgnjn.nxv16f32.f32( %0, float %1, - i32 %2) + iXLen %2) ret %a } @@ -1154,10 +1156,10 @@ declare @llvm.riscv.vfsgnjn.mask.nxv16f32.f32( , float, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnjn_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnjn_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu @@ -1169,7 +1171,7 @@ entry: %1, float %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1177,9 +1179,9 @@ entry: declare @llvm.riscv.vfsgnjn.nxv1f64.f64( , double, - i32); + iXLen); -define @intrinsic_vfsgnjn_vf_nxv1f64_nxv1f64_f64( %0, double %1, i32 %2) nounwind { +define @intrinsic_vfsgnjn_vf_nxv1f64_nxv1f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu @@ -1189,7 +1191,7 @@ entry: %a = call @llvm.riscv.vfsgnjn.nxv1f64.f64( %0, double %1, - i32 %2) + iXLen %2) ret %a } @@ -1199,10 +1201,10 @@ declare @llvm.riscv.vfsgnjn.mask.nxv1f64.f64( , double, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnjn_mask_vf_nxv1f64_nxv1f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnjn_mask_vf_nxv1f64_nxv1f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu @@ -1214,7 +1216,7 @@ entry: %1, double %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1222,9 +1224,9 @@ entry: declare @llvm.riscv.vfsgnjn.nxv2f64.f64( , double, - i32); + iXLen); -define @intrinsic_vfsgnjn_vf_nxv2f64_nxv2f64_f64( %0, double %1, i32 %2) nounwind { +define @intrinsic_vfsgnjn_vf_nxv2f64_nxv2f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu @@ -1234,7 +1236,7 @@ entry: %a = call @llvm.riscv.vfsgnjn.nxv2f64.f64( %0, double %1, - i32 %2) + iXLen %2) ret %a } @@ -1244,10 +1246,10 @@ declare @llvm.riscv.vfsgnjn.mask.nxv2f64.f64( , double, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnjn_mask_vf_nxv2f64_nxv2f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnjn_mask_vf_nxv2f64_nxv2f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu @@ -1259,7 +1261,7 @@ entry: %1, double %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1267,9 +1269,9 @@ entry: declare @llvm.riscv.vfsgnjn.nxv4f64.f64( , double, - i32); + iXLen); -define @intrinsic_vfsgnjn_vf_nxv4f64_nxv4f64_f64( %0, double %1, i32 %2) nounwind { +define @intrinsic_vfsgnjn_vf_nxv4f64_nxv4f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu @@ -1279,7 +1281,7 @@ entry: %a = call @llvm.riscv.vfsgnjn.nxv4f64.f64( %0, double %1, - i32 %2) + iXLen %2) ret %a } @@ -1289,10 +1291,10 @@ declare @llvm.riscv.vfsgnjn.mask.nxv4f64.f64( , double, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnjn_mask_vf_nxv4f64_nxv4f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnjn_mask_vf_nxv4f64_nxv4f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu @@ -1304,7 +1306,7 @@ entry: %1, double %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1312,9 +1314,9 @@ entry: declare @llvm.riscv.vfsgnjn.nxv8f64.f64( , double, - i32); + iXLen); -define @intrinsic_vfsgnjn_vf_nxv8f64_nxv8f64_f64( %0, double %1, i32 %2) nounwind { +define @intrinsic_vfsgnjn_vf_nxv8f64_nxv8f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu @@ -1324,7 +1326,7 @@ entry: %a = call @llvm.riscv.vfsgnjn.nxv8f64.f64( %0, double %1, - i32 %2) + iXLen %2) ret %a } @@ -1334,10 +1336,10 @@ declare @llvm.riscv.vfsgnjn.mask.nxv8f64.f64( , double, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnjn_mask_vf_nxv8f64_nxv8f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnjn_mask_vf_nxv8f64_nxv8f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu @@ -1349,7 +1351,7 @@ entry: %1, double %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsgnjx-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfsgnjx-rv64.ll deleted file mode 100644 index 4ae69f0d4f61..000000000000 --- a/llvm/test/CodeGen/RISCV/rvv/vfsgnjx-rv64.ll +++ /dev/null @@ -1,1355 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: -target-abi=lp64d < %s | FileCheck %s -declare @llvm.riscv.vfsgnjx.nxv1f16( - , - , - i64); - -define @intrinsic_vfsgnjx_vv_nxv1f16_nxv1f16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv1f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfsgnjx.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjx.nxv1f16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnjx.mask.nxv1f16( - , - , - , - , - i64, - i64); - -define @intrinsic_vfsgnjx_mask_vv_nxv1f16_nxv1f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv1f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfsgnjx.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjx.mask.nxv1f16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnjx.nxv2f16( - , - , - i64); - -define @intrinsic_vfsgnjx_vv_nxv2f16_nxv2f16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv2f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfsgnjx.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjx.nxv2f16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnjx.mask.nxv2f16( - , - , - , - , - i64, - i64); - -define @intrinsic_vfsgnjx_mask_vv_nxv2f16_nxv2f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv2f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfsgnjx.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjx.mask.nxv2f16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnjx.nxv4f16( - , - , - i64); - -define @intrinsic_vfsgnjx_vv_nxv4f16_nxv4f16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv4f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfsgnjx.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjx.nxv4f16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnjx.mask.nxv4f16( - , - , - , - , - i64, - i64); - -define @intrinsic_vfsgnjx_mask_vv_nxv4f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv4f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfsgnjx.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjx.mask.nxv4f16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnjx.nxv8f16( - , - , - i64); - -define @intrinsic_vfsgnjx_vv_nxv8f16_nxv8f16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv8f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfsgnjx.vv v8, v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjx.nxv8f16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnjx.mask.nxv8f16( - , - , - , - , - i64, - i64); - -define @intrinsic_vfsgnjx_mask_vv_nxv8f16_nxv8f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv8f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfsgnjx.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjx.mask.nxv8f16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnjx.nxv16f16( - , - , - i64); - -define @intrinsic_vfsgnjx_vv_nxv16f16_nxv16f16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv16f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfsgnjx.vv v8, v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjx.nxv16f16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnjx.mask.nxv16f16( - , - , - , - , - i64, - i64); - -define @intrinsic_vfsgnjx_mask_vv_nxv16f16_nxv16f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv16f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfsgnjx.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjx.mask.nxv16f16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnjx.nxv32f16( - , - , - i64); - -define @intrinsic_vfsgnjx_vv_nxv32f16_nxv32f16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv32f16_nxv32f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu -; CHECK-NEXT: vfsgnjx.vv v8, v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjx.nxv32f16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnjx.mask.nxv32f16( - , - , - , - , - i64, - i64); - -define @intrinsic_vfsgnjx_mask_vv_nxv32f16_nxv32f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv32f16_nxv32f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu -; CHECK-NEXT: vfsgnjx.vv v8, v16, v24, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjx.mask.nxv32f16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnjx.nxv1f32( - , - , - i64); - -define @intrinsic_vfsgnjx_vv_nxv1f32_nxv1f32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv1f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfsgnjx.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjx.nxv1f32( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnjx.mask.nxv1f32( - , - , - , - , - i64, - i64); - -define @intrinsic_vfsgnjx_mask_vv_nxv1f32_nxv1f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv1f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfsgnjx.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjx.mask.nxv1f32( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnjx.nxv2f32( - , - , - i64); - -define @intrinsic_vfsgnjx_vv_nxv2f32_nxv2f32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv2f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfsgnjx.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjx.nxv2f32( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnjx.mask.nxv2f32( - , - , - , - , - i64, - i64); - -define @intrinsic_vfsgnjx_mask_vv_nxv2f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv2f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfsgnjx.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjx.mask.nxv2f32( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnjx.nxv4f32( - , - , - i64); - -define @intrinsic_vfsgnjx_vv_nxv4f32_nxv4f32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv4f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfsgnjx.vv v8, v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjx.nxv4f32( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnjx.mask.nxv4f32( - , - , - , - , - i64, - i64); - -define @intrinsic_vfsgnjx_mask_vv_nxv4f32_nxv4f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv4f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfsgnjx.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjx.mask.nxv4f32( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnjx.nxv8f32( - , - , - i64); - -define @intrinsic_vfsgnjx_vv_nxv8f32_nxv8f32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv8f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfsgnjx.vv v8, v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjx.nxv8f32( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnjx.mask.nxv8f32( - , - , - , - , - i64, - i64); - -define @intrinsic_vfsgnjx_mask_vv_nxv8f32_nxv8f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv8f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfsgnjx.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjx.mask.nxv8f32( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnjx.nxv16f32( - , - , - i64); - -define @intrinsic_vfsgnjx_vv_nxv16f32_nxv16f32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv16f32_nxv16f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu -; CHECK-NEXT: vfsgnjx.vv v8, v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjx.nxv16f32( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnjx.mask.nxv16f32( - , - , - , - , - i64, - i64); - -define @intrinsic_vfsgnjx_mask_vv_nxv16f32_nxv16f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv16f32_nxv16f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vl8re32.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu -; CHECK-NEXT: vfsgnjx.vv v8, v16, v24, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjx.mask.nxv16f32( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnjx.nxv1f64( - , - , - i64); - -define @intrinsic_vfsgnjx_vv_nxv1f64_nxv1f64( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv1f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vfsgnjx.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjx.nxv1f64( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnjx.mask.nxv1f64( - , - , - , - , - i64, - i64); - -define @intrinsic_vfsgnjx_mask_vv_nxv1f64_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv1f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vfsgnjx.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjx.mask.nxv1f64( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnjx.nxv2f64( - , - , - i64); - -define @intrinsic_vfsgnjx_vv_nxv2f64_nxv2f64( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv2f64_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vfsgnjx.vv v8, v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjx.nxv2f64( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnjx.mask.nxv2f64( - , - , - , - , - i64, - i64); - -define @intrinsic_vfsgnjx_mask_vv_nxv2f64_nxv2f64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv2f64_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vfsgnjx.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjx.mask.nxv2f64( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnjx.nxv4f64( - , - , - i64); - -define @intrinsic_vfsgnjx_vv_nxv4f64_nxv4f64( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv4f64_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vfsgnjx.vv v8, v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjx.nxv4f64( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnjx.mask.nxv4f64( - , - , - , - , - i64, - i64); - -define @intrinsic_vfsgnjx_mask_vv_nxv4f64_nxv4f64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv4f64_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vfsgnjx.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjx.mask.nxv4f64( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnjx.nxv8f64( - , - , - i64); - -define @intrinsic_vfsgnjx_vv_nxv8f64_nxv8f64( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv8f64_nxv8f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu -; CHECK-NEXT: vfsgnjx.vv v8, v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjx.nxv8f64( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnjx.mask.nxv8f64( - , - , - , - , - i64, - i64); - -define @intrinsic_vfsgnjx_mask_vv_nxv8f64_nxv8f64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv8f64_nxv8f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vl8re64.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu -; CHECK-NEXT: vfsgnjx.vv v8, v16, v24, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjx.mask.nxv8f64( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnjx.nxv1f16.f16( - , - half, - i64); - -define @intrinsic_vfsgnjx_vf_nxv1f16_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv1f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfsgnjx.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjx.nxv1f16.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnjx.mask.nxv1f16.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfsgnjx_mask_vf_nxv1f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv1f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfsgnjx.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjx.mask.nxv1f16.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnjx.nxv2f16.f16( - , - half, - i64); - -define @intrinsic_vfsgnjx_vf_nxv2f16_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv2f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfsgnjx.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjx.nxv2f16.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnjx.mask.nxv2f16.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfsgnjx_mask_vf_nxv2f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv2f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfsgnjx.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjx.mask.nxv2f16.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnjx.nxv4f16.f16( - , - half, - i64); - -define @intrinsic_vfsgnjx_vf_nxv4f16_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv4f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfsgnjx.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjx.nxv4f16.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnjx.mask.nxv4f16.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfsgnjx_mask_vf_nxv4f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv4f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfsgnjx.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjx.mask.nxv4f16.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnjx.nxv8f16.f16( - , - half, - i64); - -define @intrinsic_vfsgnjx_vf_nxv8f16_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv8f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfsgnjx.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjx.nxv8f16.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnjx.mask.nxv8f16.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfsgnjx_mask_vf_nxv8f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv8f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfsgnjx.vf v8, v10, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjx.mask.nxv8f16.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnjx.nxv16f16.f16( - , - half, - i64); - -define @intrinsic_vfsgnjx_vf_nxv16f16_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv16f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfsgnjx.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjx.nxv16f16.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnjx.mask.nxv16f16.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfsgnjx_mask_vf_nxv16f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv16f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfsgnjx.vf v8, v12, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjx.mask.nxv16f16.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnjx.nxv32f16.f16( - , - half, - i64); - -define @intrinsic_vfsgnjx_vf_nxv32f16_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv32f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu -; CHECK-NEXT: vfsgnjx.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjx.nxv32f16.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnjx.mask.nxv32f16.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfsgnjx_mask_vf_nxv32f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv32f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu -; CHECK-NEXT: vfsgnjx.vf v8, v16, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjx.mask.nxv32f16.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnjx.nxv1f32.f32( - , - float, - i64); - -define @intrinsic_vfsgnjx_vf_nxv1f32_f32( %0, float %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv1f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfsgnjx.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjx.nxv1f32.f32( - %0, - float %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnjx.mask.nxv1f32.f32( - , - , - float, - , - i64, - i64); - -define @intrinsic_vfsgnjx_mask_vf_nxv1f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv1f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfsgnjx.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjx.mask.nxv1f32.f32( - %0, - %1, - float %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnjx.nxv2f32.f32( - , - float, - i64); - -define @intrinsic_vfsgnjx_vf_nxv2f32_f32( %0, float %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv2f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfsgnjx.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjx.nxv2f32.f32( - %0, - float %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnjx.mask.nxv2f32.f32( - , - , - float, - , - i64, - i64); - -define @intrinsic_vfsgnjx_mask_vf_nxv2f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv2f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfsgnjx.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjx.mask.nxv2f32.f32( - %0, - %1, - float %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnjx.nxv4f32.f32( - , - float, - i64); - -define @intrinsic_vfsgnjx_vf_nxv4f32_f32( %0, float %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv4f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfsgnjx.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjx.nxv4f32.f32( - %0, - float %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnjx.mask.nxv4f32.f32( - , - , - float, - , - i64, - i64); - -define @intrinsic_vfsgnjx_mask_vf_nxv4f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv4f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfsgnjx.vf v8, v10, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjx.mask.nxv4f32.f32( - %0, - %1, - float %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnjx.nxv8f32.f32( - , - float, - i64); - -define @intrinsic_vfsgnjx_vf_nxv8f32_f32( %0, float %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv8f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfsgnjx.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjx.nxv8f32.f32( - %0, - float %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnjx.mask.nxv8f32.f32( - , - , - float, - , - i64, - i64); - -define @intrinsic_vfsgnjx_mask_vf_nxv8f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv8f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfsgnjx.vf v8, v12, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjx.mask.nxv8f32.f32( - %0, - %1, - float %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnjx.nxv16f32.f32( - , - float, - i64); - -define @intrinsic_vfsgnjx_vf_nxv16f32_f32( %0, float %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv16f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu -; CHECK-NEXT: vfsgnjx.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjx.nxv16f32.f32( - %0, - float %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnjx.mask.nxv16f32.f32( - , - , - float, - , - i64, - i64); - -define @intrinsic_vfsgnjx_mask_vf_nxv16f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv16f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu -; CHECK-NEXT: vfsgnjx.vf v8, v16, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjx.mask.nxv16f32.f32( - %0, - %1, - float %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnjx.nxv1f64.f64( - , - double, - i64); - -define @intrinsic_vfsgnjx_vf_nxv1f64_f64( %0, double %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv1f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vfsgnjx.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjx.nxv1f64.f64( - %0, - double %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnjx.mask.nxv1f64.f64( - , - , - double, - , - i64, - i64); - -define @intrinsic_vfsgnjx_mask_vf_nxv1f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv1f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vfsgnjx.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjx.mask.nxv1f64.f64( - %0, - %1, - double %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnjx.nxv2f64.f64( - , - double, - i64); - -define @intrinsic_vfsgnjx_vf_nxv2f64_f64( %0, double %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv2f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vfsgnjx.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjx.nxv2f64.f64( - %0, - double %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnjx.mask.nxv2f64.f64( - , - , - double, - , - i64, - i64); - -define @intrinsic_vfsgnjx_mask_vf_nxv2f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv2f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vfsgnjx.vf v8, v10, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjx.mask.nxv2f64.f64( - %0, - %1, - double %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnjx.nxv4f64.f64( - , - double, - i64); - -define @intrinsic_vfsgnjx_vf_nxv4f64_f64( %0, double %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv4f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vfsgnjx.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjx.nxv4f64.f64( - %0, - double %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnjx.mask.nxv4f64.f64( - , - , - double, - , - i64, - i64); - -define @intrinsic_vfsgnjx_mask_vf_nxv4f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv4f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vfsgnjx.vf v8, v12, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjx.mask.nxv4f64.f64( - %0, - %1, - double %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsgnjx.nxv8f64.f64( - , - double, - i64); - -define @intrinsic_vfsgnjx_vf_nxv8f64_f64( %0, double %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv8f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu -; CHECK-NEXT: vfsgnjx.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjx.nxv8f64.f64( - %0, - double %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsgnjx.mask.nxv8f64.f64( - , - , - double, - , - i64, - i64); - -define @intrinsic_vfsgnjx_mask_vf_nxv8f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv8f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu -; CHECK-NEXT: vfsgnjx.vf v8, v16, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsgnjx.mask.nxv8f64.f64( - %0, - %1, - double %2, - %3, - i64 %4, i64 1) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsgnjx-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfsgnjx.ll similarity index 86% rename from llvm/test/CodeGen/RISCV/rvv/vfsgnjx-rv32.ll rename to llvm/test/CodeGen/RISCV/rvv/vfsgnjx.ll index fba4eceb23fb..edfd578ce8aa 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfsgnjx-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfsgnjx.ll @@ -1,12 +1,14 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: -target-abi=ilp32d < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfsgnjx.nxv1f16.nxv1f16( , , - i32); + iXLen); -define @intrinsic_vfsgnjx_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, i32 %2) nounwind { +define @intrinsic_vfsgnjx_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -16,7 +18,7 @@ entry: %a = call @llvm.riscv.vfsgnjx.nxv1f16.nxv1f16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -26,10 +28,10 @@ declare @llvm.riscv.vfsgnjx.mask.nxv1f16.nxv1f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnjx_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnjx_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -41,7 +43,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -49,9 +51,9 @@ entry: declare @llvm.riscv.vfsgnjx.nxv2f16.nxv2f16( , , - i32); + iXLen); -define @intrinsic_vfsgnjx_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, i32 %2) nounwind { +define @intrinsic_vfsgnjx_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -61,7 +63,7 @@ entry: %a = call @llvm.riscv.vfsgnjx.nxv2f16.nxv2f16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -71,10 +73,10 @@ declare @llvm.riscv.vfsgnjx.mask.nxv2f16.nxv2f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnjx_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnjx_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -86,7 +88,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -94,9 +96,9 @@ entry: declare @llvm.riscv.vfsgnjx.nxv4f16.nxv4f16( , , - i32); + iXLen); -define @intrinsic_vfsgnjx_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, i32 %2) nounwind { +define @intrinsic_vfsgnjx_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -106,7 +108,7 @@ entry: %a = call @llvm.riscv.vfsgnjx.nxv4f16.nxv4f16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -116,10 +118,10 @@ declare @llvm.riscv.vfsgnjx.mask.nxv4f16.nxv4f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnjx_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnjx_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -131,7 +133,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -139,9 +141,9 @@ entry: declare @llvm.riscv.vfsgnjx.nxv8f16.nxv8f16( , , - i32); + iXLen); -define @intrinsic_vfsgnjx_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, i32 %2) nounwind { +define @intrinsic_vfsgnjx_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -151,7 +153,7 @@ entry: %a = call @llvm.riscv.vfsgnjx.nxv8f16.nxv8f16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -161,10 +163,10 @@ declare @llvm.riscv.vfsgnjx.mask.nxv8f16.nxv8f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnjx_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnjx_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -176,7 +178,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -184,9 +186,9 @@ entry: declare @llvm.riscv.vfsgnjx.nxv16f16.nxv16f16( , , - i32); + iXLen); -define @intrinsic_vfsgnjx_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, i32 %2) nounwind { +define @intrinsic_vfsgnjx_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -196,7 +198,7 @@ entry: %a = call @llvm.riscv.vfsgnjx.nxv16f16.nxv16f16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -206,10 +208,10 @@ declare @llvm.riscv.vfsgnjx.mask.nxv16f16.nxv16f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnjx_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnjx_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -221,7 +223,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -229,9 +231,9 @@ entry: declare @llvm.riscv.vfsgnjx.nxv32f16.nxv32f16( , , - i32); + iXLen); -define @intrinsic_vfsgnjx_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, i32 %2) nounwind { +define @intrinsic_vfsgnjx_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv32f16_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu @@ -241,7 +243,7 @@ entry: %a = call @llvm.riscv.vfsgnjx.nxv32f16.nxv32f16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -251,10 +253,10 @@ declare @llvm.riscv.vfsgnjx.mask.nxv32f16.nxv32f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnjx_mask_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnjx_mask_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv32f16_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re16.v v24, (a0) @@ -267,7 +269,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -275,9 +277,9 @@ entry: declare @llvm.riscv.vfsgnjx.nxv1f32.nxv1f32( , , - i32); + iXLen); -define @intrinsic_vfsgnjx_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, i32 %2) nounwind { +define @intrinsic_vfsgnjx_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -287,7 +289,7 @@ entry: %a = call @llvm.riscv.vfsgnjx.nxv1f32.nxv1f32( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -297,10 +299,10 @@ declare @llvm.riscv.vfsgnjx.mask.nxv1f32.nxv1f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnjx_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnjx_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -312,7 +314,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -320,9 +322,9 @@ entry: declare @llvm.riscv.vfsgnjx.nxv2f32.nxv2f32( , , - i32); + iXLen); -define @intrinsic_vfsgnjx_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, i32 %2) nounwind { +define @intrinsic_vfsgnjx_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -332,7 +334,7 @@ entry: %a = call @llvm.riscv.vfsgnjx.nxv2f32.nxv2f32( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -342,10 +344,10 @@ declare @llvm.riscv.vfsgnjx.mask.nxv2f32.nxv2f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnjx_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnjx_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -357,7 +359,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -365,9 +367,9 @@ entry: declare @llvm.riscv.vfsgnjx.nxv4f32.nxv4f32( , , - i32); + iXLen); -define @intrinsic_vfsgnjx_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, i32 %2) nounwind { +define @intrinsic_vfsgnjx_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -377,7 +379,7 @@ entry: %a = call @llvm.riscv.vfsgnjx.nxv4f32.nxv4f32( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -387,10 +389,10 @@ declare @llvm.riscv.vfsgnjx.mask.nxv4f32.nxv4f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnjx_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnjx_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -402,7 +404,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -410,9 +412,9 @@ entry: declare @llvm.riscv.vfsgnjx.nxv8f32.nxv8f32( , , - i32); + iXLen); -define @intrinsic_vfsgnjx_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, i32 %2) nounwind { +define @intrinsic_vfsgnjx_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -422,7 +424,7 @@ entry: %a = call @llvm.riscv.vfsgnjx.nxv8f32.nxv8f32( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -432,10 +434,10 @@ declare @llvm.riscv.vfsgnjx.mask.nxv8f32.nxv8f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnjx_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnjx_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -447,7 +449,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -455,9 +457,9 @@ entry: declare @llvm.riscv.vfsgnjx.nxv16f32.nxv16f32( , , - i32); + iXLen); -define @intrinsic_vfsgnjx_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, i32 %2) nounwind { +define @intrinsic_vfsgnjx_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv16f32_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu @@ -467,7 +469,7 @@ entry: %a = call @llvm.riscv.vfsgnjx.nxv16f32.nxv16f32( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -477,10 +479,10 @@ declare @llvm.riscv.vfsgnjx.mask.nxv16f32.nxv16f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnjx_mask_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnjx_mask_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv16f32_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re32.v v24, (a0) @@ -493,7 +495,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -501,9 +503,9 @@ entry: declare @llvm.riscv.vfsgnjx.nxv1f64.nxv1f64( , , - i32); + iXLen); -define @intrinsic_vfsgnjx_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, i32 %2) nounwind { +define @intrinsic_vfsgnjx_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu @@ -513,7 +515,7 @@ entry: %a = call @llvm.riscv.vfsgnjx.nxv1f64.nxv1f64( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -523,10 +525,10 @@ declare @llvm.riscv.vfsgnjx.mask.nxv1f64.nxv1f64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnjx_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnjx_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu @@ -538,7 +540,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -546,9 +548,9 @@ entry: declare @llvm.riscv.vfsgnjx.nxv2f64.nxv2f64( , , - i32); + iXLen); -define @intrinsic_vfsgnjx_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, i32 %2) nounwind { +define @intrinsic_vfsgnjx_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu @@ -558,7 +560,7 @@ entry: %a = call @llvm.riscv.vfsgnjx.nxv2f64.nxv2f64( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -568,10 +570,10 @@ declare @llvm.riscv.vfsgnjx.mask.nxv2f64.nxv2f64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnjx_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnjx_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu @@ -583,7 +585,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -591,9 +593,9 @@ entry: declare @llvm.riscv.vfsgnjx.nxv4f64.nxv4f64( , , - i32); + iXLen); -define @intrinsic_vfsgnjx_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, i32 %2) nounwind { +define @intrinsic_vfsgnjx_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu @@ -603,7 +605,7 @@ entry: %a = call @llvm.riscv.vfsgnjx.nxv4f64.nxv4f64( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -613,10 +615,10 @@ declare @llvm.riscv.vfsgnjx.mask.nxv4f64.nxv4f64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnjx_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnjx_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu @@ -628,7 +630,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -636,9 +638,9 @@ entry: declare @llvm.riscv.vfsgnjx.nxv8f64.nxv8f64( , , - i32); + iXLen); -define @intrinsic_vfsgnjx_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, i32 %2) nounwind { +define @intrinsic_vfsgnjx_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv8f64_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu @@ -648,7 +650,7 @@ entry: %a = call @llvm.riscv.vfsgnjx.nxv8f64.nxv8f64( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -658,10 +660,10 @@ declare @llvm.riscv.vfsgnjx.mask.nxv8f64.nxv8f64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnjx_mask_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnjx_mask_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv8f64_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re64.v v24, (a0) @@ -674,7 +676,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -682,9 +684,9 @@ entry: declare @llvm.riscv.vfsgnjx.nxv1f16.f16( , half, - i32); + iXLen); -define @intrinsic_vfsgnjx_vf_nxv1f16_nxv1f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfsgnjx_vf_nxv1f16_nxv1f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -694,7 +696,7 @@ entry: %a = call @llvm.riscv.vfsgnjx.nxv1f16.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -704,10 +706,10 @@ declare @llvm.riscv.vfsgnjx.mask.nxv1f16.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnjx_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnjx_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -719,7 +721,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -727,9 +729,9 @@ entry: declare @llvm.riscv.vfsgnjx.nxv2f16.f16( , half, - i32); + iXLen); -define @intrinsic_vfsgnjx_vf_nxv2f16_nxv2f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfsgnjx_vf_nxv2f16_nxv2f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -739,7 +741,7 @@ entry: %a = call @llvm.riscv.vfsgnjx.nxv2f16.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -749,10 +751,10 @@ declare @llvm.riscv.vfsgnjx.mask.nxv2f16.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnjx_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnjx_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -764,7 +766,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -772,9 +774,9 @@ entry: declare @llvm.riscv.vfsgnjx.nxv4f16.f16( , half, - i32); + iXLen); -define @intrinsic_vfsgnjx_vf_nxv4f16_nxv4f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfsgnjx_vf_nxv4f16_nxv4f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -784,7 +786,7 @@ entry: %a = call @llvm.riscv.vfsgnjx.nxv4f16.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -794,10 +796,10 @@ declare @llvm.riscv.vfsgnjx.mask.nxv4f16.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnjx_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnjx_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -809,7 +811,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -817,9 +819,9 @@ entry: declare @llvm.riscv.vfsgnjx.nxv8f16.f16( , half, - i32); + iXLen); -define @intrinsic_vfsgnjx_vf_nxv8f16_nxv8f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfsgnjx_vf_nxv8f16_nxv8f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -829,7 +831,7 @@ entry: %a = call @llvm.riscv.vfsgnjx.nxv8f16.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -839,10 +841,10 @@ declare @llvm.riscv.vfsgnjx.mask.nxv8f16.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnjx_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnjx_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -854,7 +856,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -862,9 +864,9 @@ entry: declare @llvm.riscv.vfsgnjx.nxv16f16.f16( , half, - i32); + iXLen); -define @intrinsic_vfsgnjx_vf_nxv16f16_nxv16f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfsgnjx_vf_nxv16f16_nxv16f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -874,7 +876,7 @@ entry: %a = call @llvm.riscv.vfsgnjx.nxv16f16.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -884,10 +886,10 @@ declare @llvm.riscv.vfsgnjx.mask.nxv16f16.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnjx_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnjx_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -899,7 +901,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -907,9 +909,9 @@ entry: declare @llvm.riscv.vfsgnjx.nxv32f16.f16( , half, - i32); + iXLen); -define @intrinsic_vfsgnjx_vf_nxv32f16_nxv32f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfsgnjx_vf_nxv32f16_nxv32f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu @@ -919,7 +921,7 @@ entry: %a = call @llvm.riscv.vfsgnjx.nxv32f16.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -929,10 +931,10 @@ declare @llvm.riscv.vfsgnjx.mask.nxv32f16.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnjx_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnjx_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu @@ -944,7 +946,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -952,9 +954,9 @@ entry: declare @llvm.riscv.vfsgnjx.nxv1f32.f32( , float, - i32); + iXLen); -define @intrinsic_vfsgnjx_vf_nxv1f32_nxv1f32_f32( %0, float %1, i32 %2) nounwind { +define @intrinsic_vfsgnjx_vf_nxv1f32_nxv1f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -964,7 +966,7 @@ entry: %a = call @llvm.riscv.vfsgnjx.nxv1f32.f32( %0, float %1, - i32 %2) + iXLen %2) ret %a } @@ -974,10 +976,10 @@ declare @llvm.riscv.vfsgnjx.mask.nxv1f32.f32( , float, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnjx_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnjx_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -989,7 +991,7 @@ entry: %1, float %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -997,9 +999,9 @@ entry: declare @llvm.riscv.vfsgnjx.nxv2f32.f32( , float, - i32); + iXLen); -define @intrinsic_vfsgnjx_vf_nxv2f32_nxv2f32_f32( %0, float %1, i32 %2) nounwind { +define @intrinsic_vfsgnjx_vf_nxv2f32_nxv2f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -1009,7 +1011,7 @@ entry: %a = call @llvm.riscv.vfsgnjx.nxv2f32.f32( %0, float %1, - i32 %2) + iXLen %2) ret %a } @@ -1019,10 +1021,10 @@ declare @llvm.riscv.vfsgnjx.mask.nxv2f32.f32( , float, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnjx_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnjx_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -1034,7 +1036,7 @@ entry: %1, float %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1042,9 +1044,9 @@ entry: declare @llvm.riscv.vfsgnjx.nxv4f32.f32( , float, - i32); + iXLen); -define @intrinsic_vfsgnjx_vf_nxv4f32_nxv4f32_f32( %0, float %1, i32 %2) nounwind { +define @intrinsic_vfsgnjx_vf_nxv4f32_nxv4f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -1054,7 +1056,7 @@ entry: %a = call @llvm.riscv.vfsgnjx.nxv4f32.f32( %0, float %1, - i32 %2) + iXLen %2) ret %a } @@ -1064,10 +1066,10 @@ declare @llvm.riscv.vfsgnjx.mask.nxv4f32.f32( , float, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnjx_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnjx_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -1079,7 +1081,7 @@ entry: %1, float %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1087,9 +1089,9 @@ entry: declare @llvm.riscv.vfsgnjx.nxv8f32.f32( , float, - i32); + iXLen); -define @intrinsic_vfsgnjx_vf_nxv8f32_nxv8f32_f32( %0, float %1, i32 %2) nounwind { +define @intrinsic_vfsgnjx_vf_nxv8f32_nxv8f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -1099,7 +1101,7 @@ entry: %a = call @llvm.riscv.vfsgnjx.nxv8f32.f32( %0, float %1, - i32 %2) + iXLen %2) ret %a } @@ -1109,10 +1111,10 @@ declare @llvm.riscv.vfsgnjx.mask.nxv8f32.f32( , float, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnjx_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnjx_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -1124,7 +1126,7 @@ entry: %1, float %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1132,9 +1134,9 @@ entry: declare @llvm.riscv.vfsgnjx.nxv16f32.f32( , float, - i32); + iXLen); -define @intrinsic_vfsgnjx_vf_nxv16f32_nxv16f32_f32( %0, float %1, i32 %2) nounwind { +define @intrinsic_vfsgnjx_vf_nxv16f32_nxv16f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu @@ -1144,7 +1146,7 @@ entry: %a = call @llvm.riscv.vfsgnjx.nxv16f32.f32( %0, float %1, - i32 %2) + iXLen %2) ret %a } @@ -1154,10 +1156,10 @@ declare @llvm.riscv.vfsgnjx.mask.nxv16f32.f32( , float, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnjx_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnjx_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu @@ -1169,7 +1171,7 @@ entry: %1, float %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1177,9 +1179,9 @@ entry: declare @llvm.riscv.vfsgnjx.nxv1f64.f64( , double, - i32); + iXLen); -define @intrinsic_vfsgnjx_vf_nxv1f64_nxv1f64_f64( %0, double %1, i32 %2) nounwind { +define @intrinsic_vfsgnjx_vf_nxv1f64_nxv1f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu @@ -1189,7 +1191,7 @@ entry: %a = call @llvm.riscv.vfsgnjx.nxv1f64.f64( %0, double %1, - i32 %2) + iXLen %2) ret %a } @@ -1199,10 +1201,10 @@ declare @llvm.riscv.vfsgnjx.mask.nxv1f64.f64( , double, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnjx_mask_vf_nxv1f64_nxv1f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnjx_mask_vf_nxv1f64_nxv1f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu @@ -1214,7 +1216,7 @@ entry: %1, double %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1222,9 +1224,9 @@ entry: declare @llvm.riscv.vfsgnjx.nxv2f64.f64( , double, - i32); + iXLen); -define @intrinsic_vfsgnjx_vf_nxv2f64_nxv2f64_f64( %0, double %1, i32 %2) nounwind { +define @intrinsic_vfsgnjx_vf_nxv2f64_nxv2f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu @@ -1234,7 +1236,7 @@ entry: %a = call @llvm.riscv.vfsgnjx.nxv2f64.f64( %0, double %1, - i32 %2) + iXLen %2) ret %a } @@ -1244,10 +1246,10 @@ declare @llvm.riscv.vfsgnjx.mask.nxv2f64.f64( , double, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnjx_mask_vf_nxv2f64_nxv2f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnjx_mask_vf_nxv2f64_nxv2f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu @@ -1259,7 +1261,7 @@ entry: %1, double %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1267,9 +1269,9 @@ entry: declare @llvm.riscv.vfsgnjx.nxv4f64.f64( , double, - i32); + iXLen); -define @intrinsic_vfsgnjx_vf_nxv4f64_nxv4f64_f64( %0, double %1, i32 %2) nounwind { +define @intrinsic_vfsgnjx_vf_nxv4f64_nxv4f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu @@ -1279,7 +1281,7 @@ entry: %a = call @llvm.riscv.vfsgnjx.nxv4f64.f64( %0, double %1, - i32 %2) + iXLen %2) ret %a } @@ -1289,10 +1291,10 @@ declare @llvm.riscv.vfsgnjx.mask.nxv4f64.f64( , double, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnjx_mask_vf_nxv4f64_nxv4f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnjx_mask_vf_nxv4f64_nxv4f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu @@ -1304,7 +1306,7 @@ entry: %1, double %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1312,9 +1314,9 @@ entry: declare @llvm.riscv.vfsgnjx.nxv8f64.f64( , double, - i32); + iXLen); -define @intrinsic_vfsgnjx_vf_nxv8f64_nxv8f64_f64( %0, double %1, i32 %2) nounwind { +define @intrinsic_vfsgnjx_vf_nxv8f64_nxv8f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu @@ -1324,7 +1326,7 @@ entry: %a = call @llvm.riscv.vfsgnjx.nxv8f64.f64( %0, double %1, - i32 %2) + iXLen %2) ret %a } @@ -1334,10 +1336,10 @@ declare @llvm.riscv.vfsgnjx.mask.nxv8f64.f64( , double, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsgnjx_mask_vf_nxv8f64_nxv8f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { +define @intrinsic_vfsgnjx_mask_vf_nxv8f64_nxv8f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu @@ -1349,7 +1351,7 @@ entry: %1, double %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfslide1down-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfslide1down-rv64.ll deleted file mode 100644 index a0ba31ea2668..000000000000 --- a/llvm/test/CodeGen/RISCV/rvv/vfslide1down-rv64.ll +++ /dev/null @@ -1,677 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: -target-abi=lp64d < %s | FileCheck %s -declare @llvm.riscv.vfslide1down.nxv1f16.f16( - , - half, - i64); - -define @intrinsic_vfslide1down_vf_nxv1f16_nxv1f16_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv1f16_nxv1f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfslide1down.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfslide1down.nxv1f16.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfslide1down.mask.nxv1f16.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfslide1down_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv1f16_nxv1f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfslide1down.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfslide1down.mask.nxv1f16.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfslide1down.nxv2f16.f16( - , - half, - i64); - -define @intrinsic_vfslide1down_vf_nxv2f16_nxv2f16_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv2f16_nxv2f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfslide1down.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfslide1down.nxv2f16.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfslide1down.mask.nxv2f16.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfslide1down_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv2f16_nxv2f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfslide1down.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfslide1down.mask.nxv2f16.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfslide1down.nxv4f16.f16( - , - half, - i64); - -define @intrinsic_vfslide1down_vf_nxv4f16_nxv4f16_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv4f16_nxv4f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfslide1down.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfslide1down.nxv4f16.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfslide1down.mask.nxv4f16.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfslide1down_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv4f16_nxv4f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfslide1down.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfslide1down.mask.nxv4f16.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfslide1down.nxv8f16.f16( - , - half, - i64); - -define @intrinsic_vfslide1down_vf_nxv8f16_nxv8f16_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv8f16_nxv8f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfslide1down.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfslide1down.nxv8f16.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfslide1down.mask.nxv8f16.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfslide1down_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv8f16_nxv8f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfslide1down.vf v8, v10, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfslide1down.mask.nxv8f16.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfslide1down.nxv16f16.f16( - , - half, - i64); - -define @intrinsic_vfslide1down_vf_nxv16f16_nxv16f16_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv16f16_nxv16f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfslide1down.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfslide1down.nxv16f16.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfslide1down.mask.nxv16f16.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfslide1down_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv16f16_nxv16f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfslide1down.vf v8, v12, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfslide1down.mask.nxv16f16.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfslide1down.nxv32f16.f16( - , - half, - i64); - -define @intrinsic_vfslide1down_vf_nxv32f16_nxv32f16_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv32f16_nxv32f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu -; CHECK-NEXT: vfslide1down.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfslide1down.nxv32f16.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfslide1down.mask.nxv32f16.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfslide1down_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv32f16_nxv32f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu -; CHECK-NEXT: vfslide1down.vf v8, v16, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfslide1down.mask.nxv32f16.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfslide1down.nxv1f32.f32( - , - float, - i64); - -define @intrinsic_vfslide1down_vf_nxv1f32_nxv1f32_f32( %0, float %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv1f32_nxv1f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfslide1down.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfslide1down.nxv1f32.f32( - %0, - float %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfslide1down.mask.nxv1f32.f32( - , - , - float, - , - i64, - i64); - -define @intrinsic_vfslide1down_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv1f32_nxv1f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfslide1down.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfslide1down.mask.nxv1f32.f32( - %0, - %1, - float %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfslide1down.nxv2f32.f32( - , - float, - i64); - -define @intrinsic_vfslide1down_vf_nxv2f32_nxv2f32_f32( %0, float %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv2f32_nxv2f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfslide1down.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfslide1down.nxv2f32.f32( - %0, - float %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfslide1down.mask.nxv2f32.f32( - , - , - float, - , - i64, - i64); - -define @intrinsic_vfslide1down_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv2f32_nxv2f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfslide1down.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfslide1down.mask.nxv2f32.f32( - %0, - %1, - float %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfslide1down.nxv4f32.f32( - , - float, - i64); - -define @intrinsic_vfslide1down_vf_nxv4f32_nxv4f32_f32( %0, float %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv4f32_nxv4f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfslide1down.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfslide1down.nxv4f32.f32( - %0, - float %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfslide1down.mask.nxv4f32.f32( - , - , - float, - , - i64, - i64); - -define @intrinsic_vfslide1down_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv4f32_nxv4f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfslide1down.vf v8, v10, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfslide1down.mask.nxv4f32.f32( - %0, - %1, - float %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfslide1down.nxv8f32.f32( - , - float, - i64); - -define @intrinsic_vfslide1down_vf_nxv8f32_nxv8f32_f32( %0, float %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv8f32_nxv8f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfslide1down.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfslide1down.nxv8f32.f32( - %0, - float %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfslide1down.mask.nxv8f32.f32( - , - , - float, - , - i64, - i64); - -define @intrinsic_vfslide1down_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv8f32_nxv8f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfslide1down.vf v8, v12, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfslide1down.mask.nxv8f32.f32( - %0, - %1, - float %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfslide1down.nxv16f32.f32( - , - float, - i64); - -define @intrinsic_vfslide1down_vf_nxv16f32_nxv16f32_f32( %0, float %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv16f32_nxv16f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu -; CHECK-NEXT: vfslide1down.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfslide1down.nxv16f32.f32( - %0, - float %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfslide1down.mask.nxv16f32.f32( - , - , - float, - , - i64, - i64); - -define @intrinsic_vfslide1down_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv16f32_nxv16f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu -; CHECK-NEXT: vfslide1down.vf v8, v16, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfslide1down.mask.nxv16f32.f32( - %0, - %1, - float %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfslide1down.nxv1f64.f64( - , - double, - i64); - -define @intrinsic_vfslide1down_vf_nxv1f64_nxv1f64_f64( %0, double %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv1f64_nxv1f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vfslide1down.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfslide1down.nxv1f64.f64( - %0, - double %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfslide1down.mask.nxv1f64.f64( - , - , - double, - , - i64, - i64); - -define @intrinsic_vfslide1down_mask_vf_nxv1f64_nxv1f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv1f64_nxv1f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vfslide1down.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfslide1down.mask.nxv1f64.f64( - %0, - %1, - double %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfslide1down.nxv2f64.f64( - , - double, - i64); - -define @intrinsic_vfslide1down_vf_nxv2f64_nxv2f64_f64( %0, double %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv2f64_nxv2f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vfslide1down.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfslide1down.nxv2f64.f64( - %0, - double %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfslide1down.mask.nxv2f64.f64( - , - , - double, - , - i64, - i64); - -define @intrinsic_vfslide1down_mask_vf_nxv2f64_nxv2f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv2f64_nxv2f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vfslide1down.vf v8, v10, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfslide1down.mask.nxv2f64.f64( - %0, - %1, - double %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfslide1down.nxv4f64.f64( - , - double, - i64); - -define @intrinsic_vfslide1down_vf_nxv4f64_nxv4f64_f64( %0, double %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv4f64_nxv4f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vfslide1down.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfslide1down.nxv4f64.f64( - %0, - double %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfslide1down.mask.nxv4f64.f64( - , - , - double, - , - i64, - i64); - -define @intrinsic_vfslide1down_mask_vf_nxv4f64_nxv4f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv4f64_nxv4f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vfslide1down.vf v8, v12, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfslide1down.mask.nxv4f64.f64( - %0, - %1, - double %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfslide1down.nxv8f64.f64( - , - double, - i64); - -define @intrinsic_vfslide1down_vf_nxv8f64_nxv8f64_f64( %0, double %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv8f64_nxv8f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu -; CHECK-NEXT: vfslide1down.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfslide1down.nxv8f64.f64( - %0, - double %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfslide1down.mask.nxv8f64.f64( - , - , - double, - , - i64, - i64); - -define @intrinsic_vfslide1down_mask_vf_nxv8f64_nxv8f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv8f64_nxv8f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu -; CHECK-NEXT: vfslide1down.vf v8, v16, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfslide1down.mask.nxv8f64.f64( - %0, - %1, - double %2, - %3, - i64 %4, i64 1) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfslide1down-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfslide1down.ll similarity index 85% rename from llvm/test/CodeGen/RISCV/rvv/vfslide1down-rv32.ll rename to llvm/test/CodeGen/RISCV/rvv/vfslide1down.ll index 5baadc48d857..6cbba483a8d9 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfslide1down-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfslide1down.ll @@ -1,12 +1,14 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: -target-abi=ilp32d < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfslide1down.nxv1f16.f16( , half, - i32); + iXLen); -define @intrinsic_vfslide1down_vf_nxv1f16_nxv1f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfslide1down_vf_nxv1f16_nxv1f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -16,7 +18,7 @@ entry: %a = call @llvm.riscv.vfslide1down.nxv1f16.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -26,10 +28,10 @@ declare @llvm.riscv.vfslide1down.mask.nxv1f16.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfslide1down_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfslide1down_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -41,7 +43,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -49,9 +51,9 @@ entry: declare @llvm.riscv.vfslide1down.nxv2f16.f16( , half, - i32); + iXLen); -define @intrinsic_vfslide1down_vf_nxv2f16_nxv2f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfslide1down_vf_nxv2f16_nxv2f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -61,7 +63,7 @@ entry: %a = call @llvm.riscv.vfslide1down.nxv2f16.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -71,10 +73,10 @@ declare @llvm.riscv.vfslide1down.mask.nxv2f16.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfslide1down_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfslide1down_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -86,7 +88,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -94,9 +96,9 @@ entry: declare @llvm.riscv.vfslide1down.nxv4f16.f16( , half, - i32); + iXLen); -define @intrinsic_vfslide1down_vf_nxv4f16_nxv4f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfslide1down_vf_nxv4f16_nxv4f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -106,7 +108,7 @@ entry: %a = call @llvm.riscv.vfslide1down.nxv4f16.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -116,10 +118,10 @@ declare @llvm.riscv.vfslide1down.mask.nxv4f16.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfslide1down_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfslide1down_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -131,7 +133,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -139,9 +141,9 @@ entry: declare @llvm.riscv.vfslide1down.nxv8f16.f16( , half, - i32); + iXLen); -define @intrinsic_vfslide1down_vf_nxv8f16_nxv8f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfslide1down_vf_nxv8f16_nxv8f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -151,7 +153,7 @@ entry: %a = call @llvm.riscv.vfslide1down.nxv8f16.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -161,10 +163,10 @@ declare @llvm.riscv.vfslide1down.mask.nxv8f16.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfslide1down_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfslide1down_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -176,7 +178,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -184,9 +186,9 @@ entry: declare @llvm.riscv.vfslide1down.nxv16f16.f16( , half, - i32); + iXLen); -define @intrinsic_vfslide1down_vf_nxv16f16_nxv16f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfslide1down_vf_nxv16f16_nxv16f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -196,7 +198,7 @@ entry: %a = call @llvm.riscv.vfslide1down.nxv16f16.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -206,10 +208,10 @@ declare @llvm.riscv.vfslide1down.mask.nxv16f16.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfslide1down_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfslide1down_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -221,7 +223,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -229,9 +231,9 @@ entry: declare @llvm.riscv.vfslide1down.nxv32f16.f16( , half, - i32); + iXLen); -define @intrinsic_vfslide1down_vf_nxv32f16_nxv32f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfslide1down_vf_nxv32f16_nxv32f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu @@ -241,7 +243,7 @@ entry: %a = call @llvm.riscv.vfslide1down.nxv32f16.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -251,10 +253,10 @@ declare @llvm.riscv.vfslide1down.mask.nxv32f16.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfslide1down_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfslide1down_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu @@ -266,7 +268,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -274,9 +276,9 @@ entry: declare @llvm.riscv.vfslide1down.nxv1f32.f32( , float, - i32); + iXLen); -define @intrinsic_vfslide1down_vf_nxv1f32_nxv1f32_f32( %0, float %1, i32 %2) nounwind { +define @intrinsic_vfslide1down_vf_nxv1f32_nxv1f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -286,7 +288,7 @@ entry: %a = call @llvm.riscv.vfslide1down.nxv1f32.f32( %0, float %1, - i32 %2) + iXLen %2) ret %a } @@ -296,10 +298,10 @@ declare @llvm.riscv.vfslide1down.mask.nxv1f32.f32( , float, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfslide1down_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +define @intrinsic_vfslide1down_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -311,7 +313,7 @@ entry: %1, float %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -319,9 +321,9 @@ entry: declare @llvm.riscv.vfslide1down.nxv2f32.f32( , float, - i32); + iXLen); -define @intrinsic_vfslide1down_vf_nxv2f32_nxv2f32_f32( %0, float %1, i32 %2) nounwind { +define @intrinsic_vfslide1down_vf_nxv2f32_nxv2f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -331,7 +333,7 @@ entry: %a = call @llvm.riscv.vfslide1down.nxv2f32.f32( %0, float %1, - i32 %2) + iXLen %2) ret %a } @@ -341,10 +343,10 @@ declare @llvm.riscv.vfslide1down.mask.nxv2f32.f32( , float, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfslide1down_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +define @intrinsic_vfslide1down_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -356,7 +358,7 @@ entry: %1, float %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -364,9 +366,9 @@ entry: declare @llvm.riscv.vfslide1down.nxv4f32.f32( , float, - i32); + iXLen); -define @intrinsic_vfslide1down_vf_nxv4f32_nxv4f32_f32( %0, float %1, i32 %2) nounwind { +define @intrinsic_vfslide1down_vf_nxv4f32_nxv4f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -376,7 +378,7 @@ entry: %a = call @llvm.riscv.vfslide1down.nxv4f32.f32( %0, float %1, - i32 %2) + iXLen %2) ret %a } @@ -386,10 +388,10 @@ declare @llvm.riscv.vfslide1down.mask.nxv4f32.f32( , float, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfslide1down_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +define @intrinsic_vfslide1down_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -401,7 +403,7 @@ entry: %1, float %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -409,9 +411,9 @@ entry: declare @llvm.riscv.vfslide1down.nxv8f32.f32( , float, - i32); + iXLen); -define @intrinsic_vfslide1down_vf_nxv8f32_nxv8f32_f32( %0, float %1, i32 %2) nounwind { +define @intrinsic_vfslide1down_vf_nxv8f32_nxv8f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -421,7 +423,7 @@ entry: %a = call @llvm.riscv.vfslide1down.nxv8f32.f32( %0, float %1, - i32 %2) + iXLen %2) ret %a } @@ -431,10 +433,10 @@ declare @llvm.riscv.vfslide1down.mask.nxv8f32.f32( , float, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfslide1down_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +define @intrinsic_vfslide1down_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -446,7 +448,7 @@ entry: %1, float %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -454,9 +456,9 @@ entry: declare @llvm.riscv.vfslide1down.nxv16f32.f32( , float, - i32); + iXLen); -define @intrinsic_vfslide1down_vf_nxv16f32_nxv16f32_f32( %0, float %1, i32 %2) nounwind { +define @intrinsic_vfslide1down_vf_nxv16f32_nxv16f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu @@ -466,7 +468,7 @@ entry: %a = call @llvm.riscv.vfslide1down.nxv16f32.f32( %0, float %1, - i32 %2) + iXLen %2) ret %a } @@ -476,10 +478,10 @@ declare @llvm.riscv.vfslide1down.mask.nxv16f32.f32( , float, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfslide1down_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +define @intrinsic_vfslide1down_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu @@ -491,7 +493,7 @@ entry: %1, float %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -499,9 +501,9 @@ entry: declare @llvm.riscv.vfslide1down.nxv1f64.f64( , double, - i32); + iXLen); -define @intrinsic_vfslide1down_vf_nxv1f64_nxv1f64_f64( %0, double %1, i32 %2) nounwind { +define @intrinsic_vfslide1down_vf_nxv1f64_nxv1f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu @@ -511,7 +513,7 @@ entry: %a = call @llvm.riscv.vfslide1down.nxv1f64.f64( %0, double %1, - i32 %2) + iXLen %2) ret %a } @@ -521,10 +523,10 @@ declare @llvm.riscv.vfslide1down.mask.nxv1f64.f64( , double, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfslide1down_mask_vf_nxv1f64_nxv1f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { +define @intrinsic_vfslide1down_mask_vf_nxv1f64_nxv1f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu @@ -536,7 +538,7 @@ entry: %1, double %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -544,9 +546,9 @@ entry: declare @llvm.riscv.vfslide1down.nxv2f64.f64( , double, - i32); + iXLen); -define @intrinsic_vfslide1down_vf_nxv2f64_nxv2f64_f64( %0, double %1, i32 %2) nounwind { +define @intrinsic_vfslide1down_vf_nxv2f64_nxv2f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu @@ -556,7 +558,7 @@ entry: %a = call @llvm.riscv.vfslide1down.nxv2f64.f64( %0, double %1, - i32 %2) + iXLen %2) ret %a } @@ -566,10 +568,10 @@ declare @llvm.riscv.vfslide1down.mask.nxv2f64.f64( , double, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfslide1down_mask_vf_nxv2f64_nxv2f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { +define @intrinsic_vfslide1down_mask_vf_nxv2f64_nxv2f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu @@ -581,7 +583,7 @@ entry: %1, double %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -589,9 +591,9 @@ entry: declare @llvm.riscv.vfslide1down.nxv4f64.f64( , double, - i32); + iXLen); -define @intrinsic_vfslide1down_vf_nxv4f64_nxv4f64_f64( %0, double %1, i32 %2) nounwind { +define @intrinsic_vfslide1down_vf_nxv4f64_nxv4f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu @@ -601,7 +603,7 @@ entry: %a = call @llvm.riscv.vfslide1down.nxv4f64.f64( %0, double %1, - i32 %2) + iXLen %2) ret %a } @@ -611,10 +613,10 @@ declare @llvm.riscv.vfslide1down.mask.nxv4f64.f64( , double, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfslide1down_mask_vf_nxv4f64_nxv4f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { +define @intrinsic_vfslide1down_mask_vf_nxv4f64_nxv4f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu @@ -626,7 +628,7 @@ entry: %1, double %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -634,9 +636,9 @@ entry: declare @llvm.riscv.vfslide1down.nxv8f64.f64( , double, - i32); + iXLen); -define @intrinsic_vfslide1down_vf_nxv8f64_nxv8f64_f64( %0, double %1, i32 %2) nounwind { +define @intrinsic_vfslide1down_vf_nxv8f64_nxv8f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu @@ -646,7 +648,7 @@ entry: %a = call @llvm.riscv.vfslide1down.nxv8f64.f64( %0, double %1, - i32 %2) + iXLen %2) ret %a } @@ -656,10 +658,10 @@ declare @llvm.riscv.vfslide1down.mask.nxv8f64.f64( , double, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfslide1down_mask_vf_nxv8f64_nxv8f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { +define @intrinsic_vfslide1down_mask_vf_nxv8f64_nxv8f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu @@ -671,7 +673,7 @@ entry: %1, double %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfslide1up-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfslide1up-rv64.ll deleted file mode 100644 index 4b7d1fe55e24..000000000000 --- a/llvm/test/CodeGen/RISCV/rvv/vfslide1up-rv64.ll +++ /dev/null @@ -1,692 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: -target-abi=lp64d < %s | FileCheck %s -declare @llvm.riscv.vfslide1up.nxv1f16.f16( - , - half, - i64); - -define @intrinsic_vfslide1up_vf_nxv1f16_nxv1f16_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv1f16_nxv1f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfslide1up.vf v9, v8, fa0 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfslide1up.nxv1f16.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfslide1up.mask.nxv1f16.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfslide1up_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv1f16_nxv1f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfslide1up.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfslide1up.mask.nxv1f16.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfslide1up.nxv2f16.f16( - , - half, - i64); - -define @intrinsic_vfslide1up_vf_nxv2f16_nxv2f16_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv2f16_nxv2f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfslide1up.vf v9, v8, fa0 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfslide1up.nxv2f16.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfslide1up.mask.nxv2f16.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfslide1up_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv2f16_nxv2f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfslide1up.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfslide1up.mask.nxv2f16.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfslide1up.nxv4f16.f16( - , - half, - i64); - -define @intrinsic_vfslide1up_vf_nxv4f16_nxv4f16_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv4f16_nxv4f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfslide1up.vf v9, v8, fa0 -; CHECK-NEXT: vmv.v.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfslide1up.nxv4f16.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfslide1up.mask.nxv4f16.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfslide1up_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv4f16_nxv4f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfslide1up.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfslide1up.mask.nxv4f16.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfslide1up.nxv8f16.f16( - , - half, - i64); - -define @intrinsic_vfslide1up_vf_nxv8f16_nxv8f16_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv8f16_nxv8f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfslide1up.vf v10, v8, fa0 -; CHECK-NEXT: vmv.v.v v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfslide1up.nxv8f16.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfslide1up.mask.nxv8f16.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfslide1up_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv8f16_nxv8f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfslide1up.vf v8, v10, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfslide1up.mask.nxv8f16.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfslide1up.nxv16f16.f16( - , - half, - i64); - -define @intrinsic_vfslide1up_vf_nxv16f16_nxv16f16_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv16f16_nxv16f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfslide1up.vf v12, v8, fa0 -; CHECK-NEXT: vmv.v.v v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfslide1up.nxv16f16.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfslide1up.mask.nxv16f16.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfslide1up_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv16f16_nxv16f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfslide1up.vf v8, v12, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfslide1up.mask.nxv16f16.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfslide1up.nxv32f16.f16( - , - half, - i64); - -define @intrinsic_vfslide1up_vf_nxv32f16_nxv32f16_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv32f16_nxv32f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu -; CHECK-NEXT: vfslide1up.vf v16, v8, fa0 -; CHECK-NEXT: vmv.v.v v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfslide1up.nxv32f16.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfslide1up.mask.nxv32f16.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfslide1up_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv32f16_nxv32f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu -; CHECK-NEXT: vfslide1up.vf v8, v16, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfslide1up.mask.nxv32f16.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfslide1up.nxv1f32.f32( - , - float, - i64); - -define @intrinsic_vfslide1up_vf_nxv1f32_nxv1f32_f32( %0, float %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv1f32_nxv1f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfslide1up.vf v9, v8, fa0 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfslide1up.nxv1f32.f32( - %0, - float %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfslide1up.mask.nxv1f32.f32( - , - , - float, - , - i64, - i64); - -define @intrinsic_vfslide1up_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv1f32_nxv1f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfslide1up.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfslide1up.mask.nxv1f32.f32( - %0, - %1, - float %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfslide1up.nxv2f32.f32( - , - float, - i64); - -define @intrinsic_vfslide1up_vf_nxv2f32_nxv2f32_f32( %0, float %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv2f32_nxv2f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfslide1up.vf v9, v8, fa0 -; CHECK-NEXT: vmv.v.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfslide1up.nxv2f32.f32( - %0, - float %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfslide1up.mask.nxv2f32.f32( - , - , - float, - , - i64, - i64); - -define @intrinsic_vfslide1up_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv2f32_nxv2f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfslide1up.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfslide1up.mask.nxv2f32.f32( - %0, - %1, - float %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfslide1up.nxv4f32.f32( - , - float, - i64); - -define @intrinsic_vfslide1up_vf_nxv4f32_nxv4f32_f32( %0, float %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv4f32_nxv4f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfslide1up.vf v10, v8, fa0 -; CHECK-NEXT: vmv.v.v v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfslide1up.nxv4f32.f32( - %0, - float %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfslide1up.mask.nxv4f32.f32( - , - , - float, - , - i64, - i64); - -define @intrinsic_vfslide1up_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv4f32_nxv4f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfslide1up.vf v8, v10, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfslide1up.mask.nxv4f32.f32( - %0, - %1, - float %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfslide1up.nxv8f32.f32( - , - float, - i64); - -define @intrinsic_vfslide1up_vf_nxv8f32_nxv8f32_f32( %0, float %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv8f32_nxv8f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfslide1up.vf v12, v8, fa0 -; CHECK-NEXT: vmv.v.v v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfslide1up.nxv8f32.f32( - %0, - float %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfslide1up.mask.nxv8f32.f32( - , - , - float, - , - i64, - i64); - -define @intrinsic_vfslide1up_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv8f32_nxv8f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfslide1up.vf v8, v12, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfslide1up.mask.nxv8f32.f32( - %0, - %1, - float %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfslide1up.nxv16f32.f32( - , - float, - i64); - -define @intrinsic_vfslide1up_vf_nxv16f32_nxv16f32_f32( %0, float %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv16f32_nxv16f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu -; CHECK-NEXT: vfslide1up.vf v16, v8, fa0 -; CHECK-NEXT: vmv.v.v v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfslide1up.nxv16f32.f32( - %0, - float %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfslide1up.mask.nxv16f32.f32( - , - , - float, - , - i64, - i64); - -define @intrinsic_vfslide1up_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv16f32_nxv16f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu -; CHECK-NEXT: vfslide1up.vf v8, v16, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfslide1up.mask.nxv16f32.f32( - %0, - %1, - float %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfslide1up.nxv1f64.f64( - , - double, - i64); - -define @intrinsic_vfslide1up_vf_nxv1f64_nxv1f64_f64( %0, double %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv1f64_nxv1f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vfslide1up.vf v9, v8, fa0 -; CHECK-NEXT: vmv.v.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfslide1up.nxv1f64.f64( - %0, - double %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfslide1up.mask.nxv1f64.f64( - , - , - double, - , - i64, - i64); - -define @intrinsic_vfslide1up_mask_vf_nxv1f64_nxv1f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv1f64_nxv1f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vfslide1up.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfslide1up.mask.nxv1f64.f64( - %0, - %1, - double %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfslide1up.nxv2f64.f64( - , - double, - i64); - -define @intrinsic_vfslide1up_vf_nxv2f64_nxv2f64_f64( %0, double %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv2f64_nxv2f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vfslide1up.vf v10, v8, fa0 -; CHECK-NEXT: vmv.v.v v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfslide1up.nxv2f64.f64( - %0, - double %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfslide1up.mask.nxv2f64.f64( - , - , - double, - , - i64, - i64); - -define @intrinsic_vfslide1up_mask_vf_nxv2f64_nxv2f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv2f64_nxv2f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vfslide1up.vf v8, v10, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfslide1up.mask.nxv2f64.f64( - %0, - %1, - double %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfslide1up.nxv4f64.f64( - , - double, - i64); - -define @intrinsic_vfslide1up_vf_nxv4f64_nxv4f64_f64( %0, double %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv4f64_nxv4f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vfslide1up.vf v12, v8, fa0 -; CHECK-NEXT: vmv.v.v v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfslide1up.nxv4f64.f64( - %0, - double %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfslide1up.mask.nxv4f64.f64( - , - , - double, - , - i64, - i64); - -define @intrinsic_vfslide1up_mask_vf_nxv4f64_nxv4f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv4f64_nxv4f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vfslide1up.vf v8, v12, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfslide1up.mask.nxv4f64.f64( - %0, - %1, - double %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfslide1up.nxv8f64.f64( - , - double, - i64); - -define @intrinsic_vfslide1up_vf_nxv8f64_nxv8f64_f64( %0, double %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv8f64_nxv8f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu -; CHECK-NEXT: vfslide1up.vf v16, v8, fa0 -; CHECK-NEXT: vmv.v.v v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfslide1up.nxv8f64.f64( - %0, - double %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfslide1up.mask.nxv8f64.f64( - , - , - double, - , - i64, - i64); - -define @intrinsic_vfslide1up_mask_vf_nxv8f64_nxv8f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv8f64_nxv8f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu -; CHECK-NEXT: vfslide1up.vf v8, v16, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfslide1up.mask.nxv8f64.f64( - %0, - %1, - double %2, - %3, - i64 %4, i64 1) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfslide1up-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfslide1up.ll similarity index 86% rename from llvm/test/CodeGen/RISCV/rvv/vfslide1up-rv32.ll rename to llvm/test/CodeGen/RISCV/rvv/vfslide1up.ll index 271bf70522bf..695cf7aab3f6 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfslide1up-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfslide1up.ll @@ -1,12 +1,14 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: -target-abi=ilp32d < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfslide1up.nxv1f16.f16( , half, - i32); + iXLen); -define @intrinsic_vfslide1up_vf_nxv1f16_nxv1f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfslide1up_vf_nxv1f16_nxv1f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -17,7 +19,7 @@ entry: %a = call @llvm.riscv.vfslide1up.nxv1f16.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -27,10 +29,10 @@ declare @llvm.riscv.vfslide1up.mask.nxv1f16.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfslide1up_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfslide1up_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -42,7 +44,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -50,9 +52,9 @@ entry: declare @llvm.riscv.vfslide1up.nxv2f16.f16( , half, - i32); + iXLen); -define @intrinsic_vfslide1up_vf_nxv2f16_nxv2f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfslide1up_vf_nxv2f16_nxv2f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -63,7 +65,7 @@ entry: %a = call @llvm.riscv.vfslide1up.nxv2f16.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -73,10 +75,10 @@ declare @llvm.riscv.vfslide1up.mask.nxv2f16.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfslide1up_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfslide1up_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -88,7 +90,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -96,9 +98,9 @@ entry: declare @llvm.riscv.vfslide1up.nxv4f16.f16( , half, - i32); + iXLen); -define @intrinsic_vfslide1up_vf_nxv4f16_nxv4f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfslide1up_vf_nxv4f16_nxv4f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -109,7 +111,7 @@ entry: %a = call @llvm.riscv.vfslide1up.nxv4f16.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -119,10 +121,10 @@ declare @llvm.riscv.vfslide1up.mask.nxv4f16.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfslide1up_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfslide1up_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -134,7 +136,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -142,9 +144,9 @@ entry: declare @llvm.riscv.vfslide1up.nxv8f16.f16( , half, - i32); + iXLen); -define @intrinsic_vfslide1up_vf_nxv8f16_nxv8f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfslide1up_vf_nxv8f16_nxv8f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -155,7 +157,7 @@ entry: %a = call @llvm.riscv.vfslide1up.nxv8f16.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -165,10 +167,10 @@ declare @llvm.riscv.vfslide1up.mask.nxv8f16.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfslide1up_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfslide1up_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -180,7 +182,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -188,9 +190,9 @@ entry: declare @llvm.riscv.vfslide1up.nxv16f16.f16( , half, - i32); + iXLen); -define @intrinsic_vfslide1up_vf_nxv16f16_nxv16f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfslide1up_vf_nxv16f16_nxv16f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -201,7 +203,7 @@ entry: %a = call @llvm.riscv.vfslide1up.nxv16f16.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -211,10 +213,10 @@ declare @llvm.riscv.vfslide1up.mask.nxv16f16.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfslide1up_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfslide1up_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -226,7 +228,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -234,9 +236,9 @@ entry: declare @llvm.riscv.vfslide1up.nxv32f16.f16( , half, - i32); + iXLen); -define @intrinsic_vfslide1up_vf_nxv32f16_nxv32f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfslide1up_vf_nxv32f16_nxv32f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu @@ -247,7 +249,7 @@ entry: %a = call @llvm.riscv.vfslide1up.nxv32f16.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -257,10 +259,10 @@ declare @llvm.riscv.vfslide1up.mask.nxv32f16.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfslide1up_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfslide1up_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu @@ -272,7 +274,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -280,9 +282,9 @@ entry: declare @llvm.riscv.vfslide1up.nxv1f32.f32( , float, - i32); + iXLen); -define @intrinsic_vfslide1up_vf_nxv1f32_nxv1f32_f32( %0, float %1, i32 %2) nounwind { +define @intrinsic_vfslide1up_vf_nxv1f32_nxv1f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -293,7 +295,7 @@ entry: %a = call @llvm.riscv.vfslide1up.nxv1f32.f32( %0, float %1, - i32 %2) + iXLen %2) ret %a } @@ -303,10 +305,10 @@ declare @llvm.riscv.vfslide1up.mask.nxv1f32.f32( , float, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfslide1up_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +define @intrinsic_vfslide1up_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -318,7 +320,7 @@ entry: %1, float %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -326,9 +328,9 @@ entry: declare @llvm.riscv.vfslide1up.nxv2f32.f32( , float, - i32); + iXLen); -define @intrinsic_vfslide1up_vf_nxv2f32_nxv2f32_f32( %0, float %1, i32 %2) nounwind { +define @intrinsic_vfslide1up_vf_nxv2f32_nxv2f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -339,7 +341,7 @@ entry: %a = call @llvm.riscv.vfslide1up.nxv2f32.f32( %0, float %1, - i32 %2) + iXLen %2) ret %a } @@ -349,10 +351,10 @@ declare @llvm.riscv.vfslide1up.mask.nxv2f32.f32( , float, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfslide1up_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +define @intrinsic_vfslide1up_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -364,7 +366,7 @@ entry: %1, float %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -372,9 +374,9 @@ entry: declare @llvm.riscv.vfslide1up.nxv4f32.f32( , float, - i32); + iXLen); -define @intrinsic_vfslide1up_vf_nxv4f32_nxv4f32_f32( %0, float %1, i32 %2) nounwind { +define @intrinsic_vfslide1up_vf_nxv4f32_nxv4f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -385,7 +387,7 @@ entry: %a = call @llvm.riscv.vfslide1up.nxv4f32.f32( %0, float %1, - i32 %2) + iXLen %2) ret %a } @@ -395,10 +397,10 @@ declare @llvm.riscv.vfslide1up.mask.nxv4f32.f32( , float, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfslide1up_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +define @intrinsic_vfslide1up_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -410,7 +412,7 @@ entry: %1, float %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -418,9 +420,9 @@ entry: declare @llvm.riscv.vfslide1up.nxv8f32.f32( , float, - i32); + iXLen); -define @intrinsic_vfslide1up_vf_nxv8f32_nxv8f32_f32( %0, float %1, i32 %2) nounwind { +define @intrinsic_vfslide1up_vf_nxv8f32_nxv8f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -431,7 +433,7 @@ entry: %a = call @llvm.riscv.vfslide1up.nxv8f32.f32( %0, float %1, - i32 %2) + iXLen %2) ret %a } @@ -441,10 +443,10 @@ declare @llvm.riscv.vfslide1up.mask.nxv8f32.f32( , float, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfslide1up_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +define @intrinsic_vfslide1up_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -456,7 +458,7 @@ entry: %1, float %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -464,9 +466,9 @@ entry: declare @llvm.riscv.vfslide1up.nxv16f32.f32( , float, - i32); + iXLen); -define @intrinsic_vfslide1up_vf_nxv16f32_nxv16f32_f32( %0, float %1, i32 %2) nounwind { +define @intrinsic_vfslide1up_vf_nxv16f32_nxv16f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu @@ -477,7 +479,7 @@ entry: %a = call @llvm.riscv.vfslide1up.nxv16f32.f32( %0, float %1, - i32 %2) + iXLen %2) ret %a } @@ -487,10 +489,10 @@ declare @llvm.riscv.vfslide1up.mask.nxv16f32.f32( , float, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfslide1up_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +define @intrinsic_vfslide1up_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu @@ -502,7 +504,7 @@ entry: %1, float %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -510,9 +512,9 @@ entry: declare @llvm.riscv.vfslide1up.nxv1f64.f64( , double, - i32); + iXLen); -define @intrinsic_vfslide1up_vf_nxv1f64_nxv1f64_f64( %0, double %1, i32 %2) nounwind { +define @intrinsic_vfslide1up_vf_nxv1f64_nxv1f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu @@ -523,7 +525,7 @@ entry: %a = call @llvm.riscv.vfslide1up.nxv1f64.f64( %0, double %1, - i32 %2) + iXLen %2) ret %a } @@ -533,10 +535,10 @@ declare @llvm.riscv.vfslide1up.mask.nxv1f64.f64( , double, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfslide1up_mask_vf_nxv1f64_nxv1f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { +define @intrinsic_vfslide1up_mask_vf_nxv1f64_nxv1f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu @@ -548,7 +550,7 @@ entry: %1, double %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -556,9 +558,9 @@ entry: declare @llvm.riscv.vfslide1up.nxv2f64.f64( , double, - i32); + iXLen); -define @intrinsic_vfslide1up_vf_nxv2f64_nxv2f64_f64( %0, double %1, i32 %2) nounwind { +define @intrinsic_vfslide1up_vf_nxv2f64_nxv2f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu @@ -569,7 +571,7 @@ entry: %a = call @llvm.riscv.vfslide1up.nxv2f64.f64( %0, double %1, - i32 %2) + iXLen %2) ret %a } @@ -579,10 +581,10 @@ declare @llvm.riscv.vfslide1up.mask.nxv2f64.f64( , double, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfslide1up_mask_vf_nxv2f64_nxv2f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { +define @intrinsic_vfslide1up_mask_vf_nxv2f64_nxv2f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu @@ -594,7 +596,7 @@ entry: %1, double %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -602,9 +604,9 @@ entry: declare @llvm.riscv.vfslide1up.nxv4f64.f64( , double, - i32); + iXLen); -define @intrinsic_vfslide1up_vf_nxv4f64_nxv4f64_f64( %0, double %1, i32 %2) nounwind { +define @intrinsic_vfslide1up_vf_nxv4f64_nxv4f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu @@ -615,7 +617,7 @@ entry: %a = call @llvm.riscv.vfslide1up.nxv4f64.f64( %0, double %1, - i32 %2) + iXLen %2) ret %a } @@ -625,10 +627,10 @@ declare @llvm.riscv.vfslide1up.mask.nxv4f64.f64( , double, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfslide1up_mask_vf_nxv4f64_nxv4f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { +define @intrinsic_vfslide1up_mask_vf_nxv4f64_nxv4f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu @@ -640,7 +642,7 @@ entry: %1, double %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -648,9 +650,9 @@ entry: declare @llvm.riscv.vfslide1up.nxv8f64.f64( , double, - i32); + iXLen); -define @intrinsic_vfslide1up_vf_nxv8f64_nxv8f64_f64( %0, double %1, i32 %2) nounwind { +define @intrinsic_vfslide1up_vf_nxv8f64_nxv8f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu @@ -661,7 +663,7 @@ entry: %a = call @llvm.riscv.vfslide1up.nxv8f64.f64( %0, double %1, - i32 %2) + iXLen %2) ret %a } @@ -671,10 +673,10 @@ declare @llvm.riscv.vfslide1up.mask.nxv8f64.f64( , double, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfslide1up_mask_vf_nxv8f64_nxv8f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { +define @intrinsic_vfslide1up_mask_vf_nxv8f64_nxv8f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu @@ -686,7 +688,7 @@ entry: %1, double %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsqrt-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfsqrt-rv32.ll deleted file mode 100644 index 3b86fd763f3c..000000000000 --- a/llvm/test/CodeGen/RISCV/rvv/vfsqrt-rv32.ll +++ /dev/null @@ -1,548 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s -declare @llvm.riscv.vfsqrt.nxv1f16( - , - i32); - -define @intrinsic_vfsqrt_v_nxv1f16_nxv1f16( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vfsqrt_v_nxv1f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfsqrt.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsqrt.nxv1f16( - %0, - i32 %1) - - ret %a -} - -declare @llvm.riscv.vfsqrt.mask.nxv1f16( - , - , - , - i32, - i32); - -define @intrinsic_vfsqrt_mask_v_nxv1f16_nxv1f16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv1f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfsqrt.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsqrt.mask.nxv1f16( - %1, - %2, - %0, - i32 %3, i32 1) - - ret %a -} - -declare @llvm.riscv.vfsqrt.nxv2f16( - , - i32); - -define @intrinsic_vfsqrt_v_nxv2f16_nxv2f16( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vfsqrt_v_nxv2f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfsqrt.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsqrt.nxv2f16( - %0, - i32 %1) - - ret %a -} - -declare @llvm.riscv.vfsqrt.mask.nxv2f16( - , - , - , - i32, - i32); - -define @intrinsic_vfsqrt_mask_v_nxv2f16_nxv2f16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv2f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfsqrt.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsqrt.mask.nxv2f16( - %1, - %2, - %0, - i32 %3, i32 1) - - ret %a -} - -declare @llvm.riscv.vfsqrt.nxv4f16( - , - i32); - -define @intrinsic_vfsqrt_v_nxv4f16_nxv4f16( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vfsqrt_v_nxv4f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfsqrt.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsqrt.nxv4f16( - %0, - i32 %1) - - ret %a -} - -declare @llvm.riscv.vfsqrt.mask.nxv4f16( - , - , - , - i32, - i32); - -define @intrinsic_vfsqrt_mask_v_nxv4f16_nxv4f16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv4f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfsqrt.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsqrt.mask.nxv4f16( - %1, - %2, - %0, - i32 %3, i32 1) - - ret %a -} - -declare @llvm.riscv.vfsqrt.nxv8f16( - , - i32); - -define @intrinsic_vfsqrt_v_nxv8f16_nxv8f16( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vfsqrt_v_nxv8f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfsqrt.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsqrt.nxv8f16( - %0, - i32 %1) - - ret %a -} - -declare @llvm.riscv.vfsqrt.mask.nxv8f16( - , - , - , - i32, - i32); - -define @intrinsic_vfsqrt_mask_v_nxv8f16_nxv8f16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv8f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfsqrt.v v8, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsqrt.mask.nxv8f16( - %1, - %2, - %0, - i32 %3, i32 1) - - ret %a -} - -declare @llvm.riscv.vfsqrt.nxv16f16( - , - i32); - -define @intrinsic_vfsqrt_v_nxv16f16_nxv16f16( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vfsqrt_v_nxv16f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfsqrt.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsqrt.nxv16f16( - %0, - i32 %1) - - ret %a -} - -declare @llvm.riscv.vfsqrt.mask.nxv16f16( - , - , - , - i32, - i32); - -define @intrinsic_vfsqrt_mask_v_nxv16f16_nxv16f16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv16f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfsqrt.v v8, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsqrt.mask.nxv16f16( - %1, - %2, - %0, - i32 %3, i32 1) - - ret %a -} - -declare @llvm.riscv.vfsqrt.nxv32f16( - , - i32); - -define @intrinsic_vfsqrt_v_nxv32f16_nxv32f16( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vfsqrt_v_nxv32f16_nxv32f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu -; CHECK-NEXT: vfsqrt.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsqrt.nxv32f16( - %0, - i32 %1) - - ret %a -} - -declare @llvm.riscv.vfsqrt.nxv1f32( - , - i32); - -define @intrinsic_vfsqrt_v_nxv1f32_nxv1f32( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vfsqrt_v_nxv1f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfsqrt.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsqrt.nxv1f32( - %0, - i32 %1) - - ret %a -} - -declare @llvm.riscv.vfsqrt.mask.nxv1f32( - , - , - , - i32, - i32); - -define @intrinsic_vfsqrt_mask_v_nxv1f32_nxv1f32( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv1f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfsqrt.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsqrt.mask.nxv1f32( - %1, - %2, - %0, - i32 %3, i32 1) - - ret %a -} - -declare @llvm.riscv.vfsqrt.nxv2f32( - , - i32); - -define @intrinsic_vfsqrt_v_nxv2f32_nxv2f32( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vfsqrt_v_nxv2f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfsqrt.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsqrt.nxv2f32( - %0, - i32 %1) - - ret %a -} - -declare @llvm.riscv.vfsqrt.mask.nxv2f32( - , - , - , - i32, - i32); - -define @intrinsic_vfsqrt_mask_v_nxv2f32_nxv2f32( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv2f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfsqrt.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsqrt.mask.nxv2f32( - %1, - %2, - %0, - i32 %3, i32 1) - - ret %a -} - -declare @llvm.riscv.vfsqrt.nxv4f32( - , - i32); - -define @intrinsic_vfsqrt_v_nxv4f32_nxv4f32( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vfsqrt_v_nxv4f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfsqrt.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsqrt.nxv4f32( - %0, - i32 %1) - - ret %a -} - -declare @llvm.riscv.vfsqrt.mask.nxv4f32( - , - , - , - i32, - i32); - -define @intrinsic_vfsqrt_mask_v_nxv4f32_nxv4f32( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv4f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfsqrt.v v8, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsqrt.mask.nxv4f32( - %1, - %2, - %0, - i32 %3, i32 1) - - ret %a -} - -declare @llvm.riscv.vfsqrt.nxv8f32( - , - i32); - -define @intrinsic_vfsqrt_v_nxv8f32_nxv8f32( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vfsqrt_v_nxv8f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfsqrt.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsqrt.nxv8f32( - %0, - i32 %1) - - ret %a -} - -declare @llvm.riscv.vfsqrt.mask.nxv8f32( - , - , - , - i32, - i32); - -define @intrinsic_vfsqrt_mask_v_nxv8f32_nxv8f32( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv8f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfsqrt.v v8, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsqrt.mask.nxv8f32( - %1, - %2, - %0, - i32 %3, i32 1) - - ret %a -} - -declare @llvm.riscv.vfsqrt.nxv16f32( - , - i32); - -define @intrinsic_vfsqrt_v_nxv16f32_nxv16f32( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vfsqrt_v_nxv16f32_nxv16f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu -; CHECK-NEXT: vfsqrt.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsqrt.nxv16f32( - %0, - i32 %1) - - ret %a -} - -declare @llvm.riscv.vfsqrt.nxv1f64( - , - i32); - -define @intrinsic_vfsqrt_v_nxv1f64_nxv1f64( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vfsqrt_v_nxv1f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vfsqrt.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsqrt.nxv1f64( - %0, - i32 %1) - - ret %a -} - -declare @llvm.riscv.vfsqrt.mask.nxv1f64( - , - , - , - i32, - i32); - -define @intrinsic_vfsqrt_mask_v_nxv1f64_nxv1f64( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv1f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vfsqrt.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsqrt.mask.nxv1f64( - %1, - %2, - %0, - i32 %3, i32 1) - - ret %a -} - -declare @llvm.riscv.vfsqrt.nxv2f64( - , - i32); - -define @intrinsic_vfsqrt_v_nxv2f64_nxv2f64( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vfsqrt_v_nxv2f64_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vfsqrt.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsqrt.nxv2f64( - %0, - i32 %1) - - ret %a -} - -declare @llvm.riscv.vfsqrt.mask.nxv2f64( - , - , - , - i32, - i32); - -define @intrinsic_vfsqrt_mask_v_nxv2f64_nxv2f64( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv2f64_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vfsqrt.v v8, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsqrt.mask.nxv2f64( - %1, - %2, - %0, - i32 %3, i32 1) - - ret %a -} - -declare @llvm.riscv.vfsqrt.nxv4f64( - , - i32); - -define @intrinsic_vfsqrt_v_nxv4f64_nxv4f64( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vfsqrt_v_nxv4f64_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vfsqrt.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsqrt.nxv4f64( - %0, - i32 %1) - - ret %a -} - -declare @llvm.riscv.vfsqrt.mask.nxv4f64( - , - , - , - i32, - i32); - -define @intrinsic_vfsqrt_mask_v_nxv4f64_nxv4f64( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv4f64_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vfsqrt.v v8, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsqrt.mask.nxv4f64( - %1, - %2, - %0, - i32 %3, i32 1) - - ret %a -} - -declare @llvm.riscv.vfsqrt.nxv8f64( - , - i32); - -define @intrinsic_vfsqrt_v_nxv8f64_nxv8f64( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vfsqrt_v_nxv8f64_nxv8f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu -; CHECK-NEXT: vfsqrt.v v8, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsqrt.nxv8f64( - %0, - i32 %1) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsqrt-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfsqrt.ll similarity index 80% rename from llvm/test/CodeGen/RISCV/rvv/vfsqrt-rv64.ll rename to llvm/test/CodeGen/RISCV/rvv/vfsqrt.ll index c810a516f3b3..d944375d645c 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfsqrt-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfsqrt.ll @@ -1,22 +1,22 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfsqrt.nxv1f16( , - i64); + iXLen); -define @intrinsic_vfsqrt_v_nxv1f16_nxv1f16( +define @intrinsic_vfsqrt_v_nxv1f16_nxv1f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_v_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vfsqrt.v v8, v8 ; CHECK-NEXT: ret - %0, - i64 %1) nounwind { entry: %a = call @llvm.riscv.vfsqrt.nxv1f16( %0, - i64 %1) + iXLen %1) ret %a } @@ -25,45 +25,39 @@ declare @llvm.riscv.vfsqrt.mask.nxv1f16( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfsqrt_mask_v_nxv1f16_nxv1f16( +define @intrinsic_vfsqrt_mask_v_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vfsqrt.v v8, v9, v0.t ; CHECK-NEXT: ret - %0, - %1, - %2, - i64 %3) nounwind { entry: %a = call @llvm.riscv.vfsqrt.mask.nxv1f16( %0, %1, %2, - i64 %3, i64 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfsqrt.nxv2f16( , - i64); + iXLen); -define @intrinsic_vfsqrt_v_nxv2f16_nxv2f16( +define @intrinsic_vfsqrt_v_nxv2f16_nxv2f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_v_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vfsqrt.v v8, v8 ; CHECK-NEXT: ret - %0, - i64 %1) nounwind { entry: %a = call @llvm.riscv.vfsqrt.nxv2f16( %0, - i64 %1) + iXLen %1) ret %a } @@ -72,45 +66,39 @@ declare @llvm.riscv.vfsqrt.mask.nxv2f16( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfsqrt_mask_v_nxv2f16_nxv2f16( +define @intrinsic_vfsqrt_mask_v_nxv2f16_nxv2f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vfsqrt.v v8, v9, v0.t ; CHECK-NEXT: ret - %0, - %1, - %2, - i64 %3) nounwind { entry: %a = call @llvm.riscv.vfsqrt.mask.nxv2f16( %0, %1, %2, - i64 %3, i64 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfsqrt.nxv4f16( , - i64); + iXLen); -define @intrinsic_vfsqrt_v_nxv4f16_nxv4f16( +define @intrinsic_vfsqrt_v_nxv4f16_nxv4f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_v_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vfsqrt.v v8, v8 ; CHECK-NEXT: ret - %0, - i64 %1) nounwind { entry: %a = call @llvm.riscv.vfsqrt.nxv4f16( %0, - i64 %1) + iXLen %1) ret %a } @@ -119,45 +107,39 @@ declare @llvm.riscv.vfsqrt.mask.nxv4f16( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfsqrt_mask_v_nxv4f16_nxv4f16( +define @intrinsic_vfsqrt_mask_v_nxv4f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vfsqrt.v v8, v9, v0.t ; CHECK-NEXT: ret - %0, - %1, - %2, - i64 %3) nounwind { entry: %a = call @llvm.riscv.vfsqrt.mask.nxv4f16( %0, %1, %2, - i64 %3, i64 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfsqrt.nxv8f16( , - i64); + iXLen); -define @intrinsic_vfsqrt_v_nxv8f16_nxv8f16( +define @intrinsic_vfsqrt_v_nxv8f16_nxv8f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_v_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vfsqrt.v v8, v8 ; CHECK-NEXT: ret - %0, - i64 %1) nounwind { entry: %a = call @llvm.riscv.vfsqrt.nxv8f16( %0, - i64 %1) + iXLen %1) ret %a } @@ -166,45 +148,39 @@ declare @llvm.riscv.vfsqrt.mask.nxv8f16( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfsqrt_mask_v_nxv8f16_nxv8f16( +define @intrinsic_vfsqrt_mask_v_nxv8f16_nxv8f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vfsqrt.v v8, v10, v0.t ; CHECK-NEXT: ret - %0, - %1, - %2, - i64 %3) nounwind { entry: %a = call @llvm.riscv.vfsqrt.mask.nxv8f16( %0, %1, %2, - i64 %3, i64 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfsqrt.nxv16f16( , - i64); + iXLen); -define @intrinsic_vfsqrt_v_nxv16f16_nxv16f16( +define @intrinsic_vfsqrt_v_nxv16f16_nxv16f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_v_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vfsqrt.v v8, v8 ; CHECK-NEXT: ret - %0, - i64 %1) nounwind { entry: %a = call @llvm.riscv.vfsqrt.nxv16f16( %0, - i64 %1) + iXLen %1) ret %a } @@ -213,45 +189,39 @@ declare @llvm.riscv.vfsqrt.mask.nxv16f16( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfsqrt_mask_v_nxv16f16_nxv16f16( +define @intrinsic_vfsqrt_mask_v_nxv16f16_nxv16f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vfsqrt.v v8, v12, v0.t ; CHECK-NEXT: ret - %0, - %1, - %2, - i64 %3) nounwind { entry: %a = call @llvm.riscv.vfsqrt.mask.nxv16f16( %0, %1, %2, - i64 %3, i64 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfsqrt.nxv32f16( , - i64); + iXLen); -define @intrinsic_vfsqrt_v_nxv32f16_nxv32f16( +define @intrinsic_vfsqrt_v_nxv32f16_nxv32f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_v_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu ; CHECK-NEXT: vfsqrt.v v8, v8 ; CHECK-NEXT: ret - %0, - i64 %1) nounwind { entry: %a = call @llvm.riscv.vfsqrt.nxv32f16( %0, - i64 %1) + iXLen %1) ret %a } @@ -260,45 +230,39 @@ declare @llvm.riscv.vfsqrt.mask.nxv32f16( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfsqrt_mask_v_nxv32f16_nxv32f16( +define @intrinsic_vfsqrt_mask_v_nxv32f16_nxv32f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu ; CHECK-NEXT: vfsqrt.v v8, v16, v0.t ; CHECK-NEXT: ret - %0, - %1, - %2, - i64 %3) nounwind { entry: %a = call @llvm.riscv.vfsqrt.mask.nxv32f16( %0, %1, %2, - i64 %3, i64 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfsqrt.nxv1f32( , - i64); + iXLen); -define @intrinsic_vfsqrt_v_nxv1f32_nxv1f32( +define @intrinsic_vfsqrt_v_nxv1f32_nxv1f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_v_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vfsqrt.v v8, v8 ; CHECK-NEXT: ret - %0, - i64 %1) nounwind { entry: %a = call @llvm.riscv.vfsqrt.nxv1f32( %0, - i64 %1) + iXLen %1) ret %a } @@ -307,45 +271,39 @@ declare @llvm.riscv.vfsqrt.mask.nxv1f32( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfsqrt_mask_v_nxv1f32_nxv1f32( +define @intrinsic_vfsqrt_mask_v_nxv1f32_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vfsqrt.v v8, v9, v0.t ; CHECK-NEXT: ret - %0, - %1, - %2, - i64 %3) nounwind { entry: %a = call @llvm.riscv.vfsqrt.mask.nxv1f32( %0, %1, %2, - i64 %3, i64 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfsqrt.nxv2f32( , - i64); + iXLen); -define @intrinsic_vfsqrt_v_nxv2f32_nxv2f32( +define @intrinsic_vfsqrt_v_nxv2f32_nxv2f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_v_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfsqrt.v v8, v8 ; CHECK-NEXT: ret - %0, - i64 %1) nounwind { entry: %a = call @llvm.riscv.vfsqrt.nxv2f32( %0, - i64 %1) + iXLen %1) ret %a } @@ -354,45 +312,39 @@ declare @llvm.riscv.vfsqrt.mask.nxv2f32( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfsqrt_mask_v_nxv2f32_nxv2f32( +define @intrinsic_vfsqrt_mask_v_nxv2f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfsqrt.v v8, v9, v0.t ; CHECK-NEXT: ret - %0, - %1, - %2, - i64 %3) nounwind { entry: %a = call @llvm.riscv.vfsqrt.mask.nxv2f32( %0, %1, %2, - i64 %3, i64 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfsqrt.nxv4f32( , - i64); + iXLen); -define @intrinsic_vfsqrt_v_nxv4f32_nxv4f32( +define @intrinsic_vfsqrt_v_nxv4f32_nxv4f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_v_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vfsqrt.v v8, v8 ; CHECK-NEXT: ret - %0, - i64 %1) nounwind { entry: %a = call @llvm.riscv.vfsqrt.nxv4f32( %0, - i64 %1) + iXLen %1) ret %a } @@ -401,45 +353,39 @@ declare @llvm.riscv.vfsqrt.mask.nxv4f32( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfsqrt_mask_v_nxv4f32_nxv4f32( +define @intrinsic_vfsqrt_mask_v_nxv4f32_nxv4f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vfsqrt.v v8, v10, v0.t ; CHECK-NEXT: ret - %0, - %1, - %2, - i64 %3) nounwind { entry: %a = call @llvm.riscv.vfsqrt.mask.nxv4f32( %0, %1, %2, - i64 %3, i64 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfsqrt.nxv8f32( , - i64); + iXLen); -define @intrinsic_vfsqrt_v_nxv8f32_nxv8f32( +define @intrinsic_vfsqrt_v_nxv8f32_nxv8f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_v_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vfsqrt.v v8, v8 ; CHECK-NEXT: ret - %0, - i64 %1) nounwind { entry: %a = call @llvm.riscv.vfsqrt.nxv8f32( %0, - i64 %1) + iXLen %1) ret %a } @@ -448,45 +394,39 @@ declare @llvm.riscv.vfsqrt.mask.nxv8f32( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfsqrt_mask_v_nxv8f32_nxv8f32( +define @intrinsic_vfsqrt_mask_v_nxv8f32_nxv8f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vfsqrt.v v8, v12, v0.t ; CHECK-NEXT: ret - %0, - %1, - %2, - i64 %3) nounwind { entry: %a = call @llvm.riscv.vfsqrt.mask.nxv8f32( %0, %1, %2, - i64 %3, i64 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfsqrt.nxv16f32( , - i64); + iXLen); -define @intrinsic_vfsqrt_v_nxv16f32_nxv16f32( +define @intrinsic_vfsqrt_v_nxv16f32_nxv16f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_v_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vfsqrt.v v8, v8 ; CHECK-NEXT: ret - %0, - i64 %1) nounwind { entry: %a = call @llvm.riscv.vfsqrt.nxv16f32( %0, - i64 %1) + iXLen %1) ret %a } @@ -495,45 +435,39 @@ declare @llvm.riscv.vfsqrt.mask.nxv16f32( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfsqrt_mask_v_nxv16f32_nxv16f32( +define @intrinsic_vfsqrt_mask_v_nxv16f32_nxv16f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vfsqrt.v v8, v16, v0.t ; CHECK-NEXT: ret - %0, - %1, - %2, - i64 %3) nounwind { entry: %a = call @llvm.riscv.vfsqrt.mask.nxv16f32( %0, %1, %2, - i64 %3, i64 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfsqrt.nxv1f64( , - i64); + iXLen); -define @intrinsic_vfsqrt_v_nxv1f64_nxv1f64( +define @intrinsic_vfsqrt_v_nxv1f64_nxv1f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_v_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vfsqrt.v v8, v8 ; CHECK-NEXT: ret - %0, - i64 %1) nounwind { entry: %a = call @llvm.riscv.vfsqrt.nxv1f64( %0, - i64 %1) + iXLen %1) ret %a } @@ -542,45 +476,39 @@ declare @llvm.riscv.vfsqrt.mask.nxv1f64( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfsqrt_mask_v_nxv1f64_nxv1f64( +define @intrinsic_vfsqrt_mask_v_nxv1f64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vfsqrt.v v8, v9, v0.t ; CHECK-NEXT: ret - %0, - %1, - %2, - i64 %3) nounwind { entry: %a = call @llvm.riscv.vfsqrt.mask.nxv1f64( %0, %1, %2, - i64 %3, i64 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfsqrt.nxv2f64( , - i64); + iXLen); -define @intrinsic_vfsqrt_v_nxv2f64_nxv2f64( +define @intrinsic_vfsqrt_v_nxv2f64_nxv2f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_v_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vfsqrt.v v8, v8 ; CHECK-NEXT: ret - %0, - i64 %1) nounwind { entry: %a = call @llvm.riscv.vfsqrt.nxv2f64( %0, - i64 %1) + iXLen %1) ret %a } @@ -589,45 +517,39 @@ declare @llvm.riscv.vfsqrt.mask.nxv2f64( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfsqrt_mask_v_nxv2f64_nxv2f64( +define @intrinsic_vfsqrt_mask_v_nxv2f64_nxv2f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vfsqrt.v v8, v10, v0.t ; CHECK-NEXT: ret - %0, - %1, - %2, - i64 %3) nounwind { entry: %a = call @llvm.riscv.vfsqrt.mask.nxv2f64( %0, %1, %2, - i64 %3, i64 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfsqrt.nxv4f64( , - i64); + iXLen); -define @intrinsic_vfsqrt_v_nxv4f64_nxv4f64( +define @intrinsic_vfsqrt_v_nxv4f64_nxv4f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_v_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vfsqrt.v v8, v8 ; CHECK-NEXT: ret - %0, - i64 %1) nounwind { entry: %a = call @llvm.riscv.vfsqrt.nxv4f64( %0, - i64 %1) + iXLen %1) ret %a } @@ -636,45 +558,39 @@ declare @llvm.riscv.vfsqrt.mask.nxv4f64( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfsqrt_mask_v_nxv4f64_nxv4f64( +define @intrinsic_vfsqrt_mask_v_nxv4f64_nxv4f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vfsqrt.v v8, v12, v0.t ; CHECK-NEXT: ret - %0, - %1, - %2, - i64 %3) nounwind { entry: %a = call @llvm.riscv.vfsqrt.mask.nxv4f64( %0, %1, %2, - i64 %3, i64 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfsqrt.nxv8f64( , - i64); + iXLen); -define @intrinsic_vfsqrt_v_nxv8f64_nxv8f64( +define @intrinsic_vfsqrt_v_nxv8f64_nxv8f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_v_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vfsqrt.v v8, v8 ; CHECK-NEXT: ret - %0, - i64 %1) nounwind { entry: %a = call @llvm.riscv.vfsqrt.nxv8f64( %0, - i64 %1) + iXLen %1) ret %a } @@ -683,25 +599,21 @@ declare @llvm.riscv.vfsqrt.mask.nxv8f64( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfsqrt_mask_v_nxv8f64_nxv8f64( +define @intrinsic_vfsqrt_mask_v_nxv8f64_nxv8f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vfsqrt.v v8, v16, v0.t ; CHECK-NEXT: ret - %0, - %1, - %2, - i64 %3) nounwind { entry: %a = call @llvm.riscv.vfsqrt.mask.nxv8f64( %0, %1, %2, - i64 %3, i64 1) + iXLen %3, iXLen 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsub-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfsub-rv64.ll deleted file mode 100644 index 7445cfb806d4..000000000000 --- a/llvm/test/CodeGen/RISCV/rvv/vfsub-rv64.ll +++ /dev/null @@ -1,1356 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v -mattr=+zfh \ -; RUN: -mattr=+d -verify-machineinstrs \ -; RUN: -target-abi=lp64d < %s | FileCheck %s -declare @llvm.riscv.vfsub.nxv1f16.nxv1f16( - , - , - i64); - -define @intrinsic_vfsub_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsub_vv_nxv1f16_nxv1f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfsub.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsub.nxv1f16.nxv1f16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsub.mask.nxv1f16.nxv1f16( - , - , - , - , - i64, - i64); - -define @intrinsic_vfsub_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv1f16_nxv1f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfsub.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsub.mask.nxv1f16.nxv1f16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsub.nxv2f16.nxv2f16( - , - , - i64); - -define @intrinsic_vfsub_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsub_vv_nxv2f16_nxv2f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfsub.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsub.nxv2f16.nxv2f16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsub.mask.nxv2f16.nxv2f16( - , - , - , - , - i64, - i64); - -define @intrinsic_vfsub_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv2f16_nxv2f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfsub.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsub.mask.nxv2f16.nxv2f16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsub.nxv4f16.nxv4f16( - , - , - i64); - -define @intrinsic_vfsub_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsub_vv_nxv4f16_nxv4f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfsub.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsub.nxv4f16.nxv4f16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsub.mask.nxv4f16.nxv4f16( - , - , - , - , - i64, - i64); - -define @intrinsic_vfsub_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv4f16_nxv4f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfsub.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsub.mask.nxv4f16.nxv4f16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsub.nxv8f16.nxv8f16( - , - , - i64); - -define @intrinsic_vfsub_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsub_vv_nxv8f16_nxv8f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfsub.vv v8, v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsub.nxv8f16.nxv8f16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsub.mask.nxv8f16.nxv8f16( - , - , - , - , - i64, - i64); - -define @intrinsic_vfsub_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv8f16_nxv8f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfsub.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsub.mask.nxv8f16.nxv8f16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsub.nxv16f16.nxv16f16( - , - , - i64); - -define @intrinsic_vfsub_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsub_vv_nxv16f16_nxv16f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfsub.vv v8, v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsub.nxv16f16.nxv16f16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsub.mask.nxv16f16.nxv16f16( - , - , - , - , - i64, - i64); - -define @intrinsic_vfsub_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv16f16_nxv16f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfsub.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsub.mask.nxv16f16.nxv16f16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsub.nxv32f16.nxv32f16( - , - , - i64); - -define @intrinsic_vfsub_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsub_vv_nxv32f16_nxv32f16_nxv32f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu -; CHECK-NEXT: vfsub.vv v8, v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsub.nxv32f16.nxv32f16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsub.mask.nxv32f16.nxv32f16( - , - , - , - , - i64, - i64); - -define @intrinsic_vfsub_mask_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv32f16_nxv32f16_nxv32f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu -; CHECK-NEXT: vfsub.vv v8, v16, v24, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsub.mask.nxv32f16.nxv32f16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsub.nxv1f32.nxv1f32( - , - , - i64); - -define @intrinsic_vfsub_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsub_vv_nxv1f32_nxv1f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfsub.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsub.nxv1f32.nxv1f32( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsub.mask.nxv1f32.nxv1f32( - , - , - , - , - i64, - i64); - -define @intrinsic_vfsub_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv1f32_nxv1f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfsub.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsub.mask.nxv1f32.nxv1f32( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsub.nxv2f32.nxv2f32( - , - , - i64); - -define @intrinsic_vfsub_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsub_vv_nxv2f32_nxv2f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfsub.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsub.nxv2f32.nxv2f32( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsub.mask.nxv2f32.nxv2f32( - , - , - , - , - i64, - i64); - -define @intrinsic_vfsub_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv2f32_nxv2f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfsub.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsub.mask.nxv2f32.nxv2f32( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsub.nxv4f32.nxv4f32( - , - , - i64); - -define @intrinsic_vfsub_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsub_vv_nxv4f32_nxv4f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfsub.vv v8, v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsub.nxv4f32.nxv4f32( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsub.mask.nxv4f32.nxv4f32( - , - , - , - , - i64, - i64); - -define @intrinsic_vfsub_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv4f32_nxv4f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfsub.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsub.mask.nxv4f32.nxv4f32( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsub.nxv8f32.nxv8f32( - , - , - i64); - -define @intrinsic_vfsub_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsub_vv_nxv8f32_nxv8f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfsub.vv v8, v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsub.nxv8f32.nxv8f32( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsub.mask.nxv8f32.nxv8f32( - , - , - , - , - i64, - i64); - -define @intrinsic_vfsub_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv8f32_nxv8f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfsub.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsub.mask.nxv8f32.nxv8f32( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsub.nxv16f32.nxv16f32( - , - , - i64); - -define @intrinsic_vfsub_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsub_vv_nxv16f32_nxv16f32_nxv16f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu -; CHECK-NEXT: vfsub.vv v8, v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsub.nxv16f32.nxv16f32( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsub.mask.nxv16f32.nxv16f32( - , - , - , - , - i64, - i64); - -define @intrinsic_vfsub_mask_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv16f32_nxv16f32_nxv16f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vl8re32.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu -; CHECK-NEXT: vfsub.vv v8, v16, v24, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsub.mask.nxv16f32.nxv16f32( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsub.nxv1f64.nxv1f64( - , - , - i64); - -define @intrinsic_vfsub_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsub_vv_nxv1f64_nxv1f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vfsub.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsub.nxv1f64.nxv1f64( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsub.mask.nxv1f64.nxv1f64( - , - , - , - , - i64, - i64); - -define @intrinsic_vfsub_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv1f64_nxv1f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vfsub.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsub.mask.nxv1f64.nxv1f64( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsub.nxv2f64.nxv2f64( - , - , - i64); - -define @intrinsic_vfsub_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsub_vv_nxv2f64_nxv2f64_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vfsub.vv v8, v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsub.nxv2f64.nxv2f64( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsub.mask.nxv2f64.nxv2f64( - , - , - , - , - i64, - i64); - -define @intrinsic_vfsub_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv2f64_nxv2f64_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vfsub.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsub.mask.nxv2f64.nxv2f64( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsub.nxv4f64.nxv4f64( - , - , - i64); - -define @intrinsic_vfsub_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsub_vv_nxv4f64_nxv4f64_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vfsub.vv v8, v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsub.nxv4f64.nxv4f64( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsub.mask.nxv4f64.nxv4f64( - , - , - , - , - i64, - i64); - -define @intrinsic_vfsub_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv4f64_nxv4f64_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vfsub.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsub.mask.nxv4f64.nxv4f64( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsub.nxv8f64.nxv8f64( - , - , - i64); - -define @intrinsic_vfsub_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsub_vv_nxv8f64_nxv8f64_nxv8f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu -; CHECK-NEXT: vfsub.vv v8, v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsub.nxv8f64.nxv8f64( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsub.mask.nxv8f64.nxv8f64( - , - , - , - , - i64, - i64); - -define @intrinsic_vfsub_mask_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv8f64_nxv8f64_nxv8f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vl8re64.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu -; CHECK-NEXT: vfsub.vv v8, v16, v24, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsub.mask.nxv8f64.nxv8f64( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsub.nxv1f16.f16( - , - half, - i64); - -define @intrinsic_vfsub_vf_nxv1f16_nxv1f16_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsub_vf_nxv1f16_nxv1f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfsub.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsub.nxv1f16.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsub.mask.nxv1f16.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfsub_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv1f16_nxv1f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfsub.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsub.mask.nxv1f16.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsub.nxv2f16.f16( - , - half, - i64); - -define @intrinsic_vfsub_vf_nxv2f16_nxv2f16_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsub_vf_nxv2f16_nxv2f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfsub.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsub.nxv2f16.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsub.mask.nxv2f16.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfsub_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv2f16_nxv2f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfsub.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsub.mask.nxv2f16.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsub.nxv4f16.f16( - , - half, - i64); - -define @intrinsic_vfsub_vf_nxv4f16_nxv4f16_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsub_vf_nxv4f16_nxv4f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfsub.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsub.nxv4f16.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsub.mask.nxv4f16.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfsub_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv4f16_nxv4f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfsub.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsub.mask.nxv4f16.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsub.nxv8f16.f16( - , - half, - i64); - -define @intrinsic_vfsub_vf_nxv8f16_nxv8f16_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsub_vf_nxv8f16_nxv8f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfsub.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsub.nxv8f16.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsub.mask.nxv8f16.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfsub_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv8f16_nxv8f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfsub.vf v8, v10, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsub.mask.nxv8f16.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsub.nxv16f16.f16( - , - half, - i64); - -define @intrinsic_vfsub_vf_nxv16f16_nxv16f16_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsub_vf_nxv16f16_nxv16f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfsub.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsub.nxv16f16.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsub.mask.nxv16f16.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfsub_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv16f16_nxv16f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfsub.vf v8, v12, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsub.mask.nxv16f16.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsub.nxv32f16.f16( - , - half, - i64); - -define @intrinsic_vfsub_vf_nxv32f16_nxv32f16_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsub_vf_nxv32f16_nxv32f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu -; CHECK-NEXT: vfsub.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsub.nxv32f16.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsub.mask.nxv32f16.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfsub_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv32f16_nxv32f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu -; CHECK-NEXT: vfsub.vf v8, v16, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsub.mask.nxv32f16.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsub.nxv1f32.f32( - , - float, - i64); - -define @intrinsic_vfsub_vf_nxv1f32_nxv1f32_f32( %0, float %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsub_vf_nxv1f32_nxv1f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfsub.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsub.nxv1f32.f32( - %0, - float %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsub.mask.nxv1f32.f32( - , - , - float, - , - i64, - i64); - -define @intrinsic_vfsub_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv1f32_nxv1f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfsub.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsub.mask.nxv1f32.f32( - %0, - %1, - float %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsub.nxv2f32.f32( - , - float, - i64); - -define @intrinsic_vfsub_vf_nxv2f32_nxv2f32_f32( %0, float %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsub_vf_nxv2f32_nxv2f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfsub.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsub.nxv2f32.f32( - %0, - float %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsub.mask.nxv2f32.f32( - , - , - float, - , - i64, - i64); - -define @intrinsic_vfsub_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv2f32_nxv2f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfsub.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsub.mask.nxv2f32.f32( - %0, - %1, - float %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsub.nxv4f32.f32( - , - float, - i64); - -define @intrinsic_vfsub_vf_nxv4f32_nxv4f32_f32( %0, float %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsub_vf_nxv4f32_nxv4f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfsub.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsub.nxv4f32.f32( - %0, - float %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsub.mask.nxv4f32.f32( - , - , - float, - , - i64, - i64); - -define @intrinsic_vfsub_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv4f32_nxv4f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfsub.vf v8, v10, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsub.mask.nxv4f32.f32( - %0, - %1, - float %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsub.nxv8f32.f32( - , - float, - i64); - -define @intrinsic_vfsub_vf_nxv8f32_nxv8f32_f32( %0, float %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsub_vf_nxv8f32_nxv8f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfsub.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsub.nxv8f32.f32( - %0, - float %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsub.mask.nxv8f32.f32( - , - , - float, - , - i64, - i64); - -define @intrinsic_vfsub_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv8f32_nxv8f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfsub.vf v8, v12, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsub.mask.nxv8f32.f32( - %0, - %1, - float %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsub.nxv16f32.f32( - , - float, - i64); - -define @intrinsic_vfsub_vf_nxv16f32_nxv16f32_f32( %0, float %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsub_vf_nxv16f32_nxv16f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu -; CHECK-NEXT: vfsub.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsub.nxv16f32.f32( - %0, - float %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsub.mask.nxv16f32.f32( - , - , - float, - , - i64, - i64); - -define @intrinsic_vfsub_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv16f32_nxv16f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu -; CHECK-NEXT: vfsub.vf v8, v16, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsub.mask.nxv16f32.f32( - %0, - %1, - float %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsub.nxv1f64.f64( - , - double, - i64); - -define @intrinsic_vfsub_vf_nxv1f64_nxv1f64_f64( %0, double %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsub_vf_nxv1f64_nxv1f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vfsub.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsub.nxv1f64.f64( - %0, - double %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsub.mask.nxv1f64.f64( - , - , - double, - , - i64, - i64); - -define @intrinsic_vfsub_mask_vf_nxv1f64_nxv1f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv1f64_nxv1f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vfsub.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsub.mask.nxv1f64.f64( - %0, - %1, - double %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsub.nxv2f64.f64( - , - double, - i64); - -define @intrinsic_vfsub_vf_nxv2f64_nxv2f64_f64( %0, double %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsub_vf_nxv2f64_nxv2f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vfsub.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsub.nxv2f64.f64( - %0, - double %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsub.mask.nxv2f64.f64( - , - , - double, - , - i64, - i64); - -define @intrinsic_vfsub_mask_vf_nxv2f64_nxv2f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv2f64_nxv2f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vfsub.vf v8, v10, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsub.mask.nxv2f64.f64( - %0, - %1, - double %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsub.nxv4f64.f64( - , - double, - i64); - -define @intrinsic_vfsub_vf_nxv4f64_nxv4f64_f64( %0, double %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsub_vf_nxv4f64_nxv4f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vfsub.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsub.nxv4f64.f64( - %0, - double %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsub.mask.nxv4f64.f64( - , - , - double, - , - i64, - i64); - -define @intrinsic_vfsub_mask_vf_nxv4f64_nxv4f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv4f64_nxv4f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vfsub.vf v8, v12, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsub.mask.nxv4f64.f64( - %0, - %1, - double %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfsub.nxv8f64.f64( - , - double, - i64); - -define @intrinsic_vfsub_vf_nxv8f64_nxv8f64_f64( %0, double %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfsub_vf_nxv8f64_nxv8f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu -; CHECK-NEXT: vfsub.vf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsub.nxv8f64.f64( - %0, - double %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfsub.mask.nxv8f64.f64( - , - , - double, - , - i64, - i64); - -define @intrinsic_vfsub_mask_vf_nxv8f64_nxv8f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv8f64_nxv8f64_f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu -; CHECK-NEXT: vfsub.vf v8, v16, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfsub.mask.nxv8f64.f64( - %0, - %1, - double %2, - %3, - i64 %4, i64 1) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsub-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfsub.ll similarity index 86% rename from llvm/test/CodeGen/RISCV/rvv/vfsub-rv32.ll rename to llvm/test/CodeGen/RISCV/rvv/vfsub.ll index 86371c1685fc..645fb340ffa3 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfsub-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfsub.ll @@ -1,12 +1,14 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: -target-abi=ilp32d < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfsub.nxv1f16.nxv1f16( , , - i32); + iXLen); -define @intrinsic_vfsub_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, i32 %2) nounwind { +define @intrinsic_vfsub_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -16,7 +18,7 @@ entry: %a = call @llvm.riscv.vfsub.nxv1f16.nxv1f16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -26,10 +28,10 @@ declare @llvm.riscv.vfsub.mask.nxv1f16.nxv1f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsub_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfsub_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -41,7 +43,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -49,9 +51,9 @@ entry: declare @llvm.riscv.vfsub.nxv2f16.nxv2f16( , , - i32); + iXLen); -define @intrinsic_vfsub_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, i32 %2) nounwind { +define @intrinsic_vfsub_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -61,7 +63,7 @@ entry: %a = call @llvm.riscv.vfsub.nxv2f16.nxv2f16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -71,10 +73,10 @@ declare @llvm.riscv.vfsub.mask.nxv2f16.nxv2f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsub_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfsub_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -86,7 +88,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -94,9 +96,9 @@ entry: declare @llvm.riscv.vfsub.nxv4f16.nxv4f16( , , - i32); + iXLen); -define @intrinsic_vfsub_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, i32 %2) nounwind { +define @intrinsic_vfsub_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -106,7 +108,7 @@ entry: %a = call @llvm.riscv.vfsub.nxv4f16.nxv4f16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -116,10 +118,10 @@ declare @llvm.riscv.vfsub.mask.nxv4f16.nxv4f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsub_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfsub_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -131,7 +133,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -139,9 +141,9 @@ entry: declare @llvm.riscv.vfsub.nxv8f16.nxv8f16( , , - i32); + iXLen); -define @intrinsic_vfsub_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, i32 %2) nounwind { +define @intrinsic_vfsub_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -151,7 +153,7 @@ entry: %a = call @llvm.riscv.vfsub.nxv8f16.nxv8f16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -161,10 +163,10 @@ declare @llvm.riscv.vfsub.mask.nxv8f16.nxv8f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsub_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfsub_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -176,7 +178,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -184,9 +186,9 @@ entry: declare @llvm.riscv.vfsub.nxv16f16.nxv16f16( , , - i32); + iXLen); -define @intrinsic_vfsub_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, i32 %2) nounwind { +define @intrinsic_vfsub_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -196,7 +198,7 @@ entry: %a = call @llvm.riscv.vfsub.nxv16f16.nxv16f16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -206,10 +208,10 @@ declare @llvm.riscv.vfsub.mask.nxv16f16.nxv16f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsub_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfsub_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -221,7 +223,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -229,9 +231,9 @@ entry: declare @llvm.riscv.vfsub.nxv32f16.nxv32f16( , , - i32); + iXLen); -define @intrinsic_vfsub_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, i32 %2) nounwind { +define @intrinsic_vfsub_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vv_nxv32f16_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu @@ -241,7 +243,7 @@ entry: %a = call @llvm.riscv.vfsub.nxv32f16.nxv32f16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -251,10 +253,10 @@ declare @llvm.riscv.vfsub.mask.nxv32f16.nxv32f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsub_mask_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfsub_mask_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv32f16_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re16.v v24, (a0) @@ -267,7 +269,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -275,9 +277,9 @@ entry: declare @llvm.riscv.vfsub.nxv1f32.nxv1f32( , , - i32); + iXLen); -define @intrinsic_vfsub_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, i32 %2) nounwind { +define @intrinsic_vfsub_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -287,7 +289,7 @@ entry: %a = call @llvm.riscv.vfsub.nxv1f32.nxv1f32( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -297,10 +299,10 @@ declare @llvm.riscv.vfsub.mask.nxv1f32.nxv1f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsub_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfsub_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -312,7 +314,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -320,9 +322,9 @@ entry: declare @llvm.riscv.vfsub.nxv2f32.nxv2f32( , , - i32); + iXLen); -define @intrinsic_vfsub_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, i32 %2) nounwind { +define @intrinsic_vfsub_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -332,7 +334,7 @@ entry: %a = call @llvm.riscv.vfsub.nxv2f32.nxv2f32( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -342,10 +344,10 @@ declare @llvm.riscv.vfsub.mask.nxv2f32.nxv2f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsub_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfsub_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -357,7 +359,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -365,9 +367,9 @@ entry: declare @llvm.riscv.vfsub.nxv4f32.nxv4f32( , , - i32); + iXLen); -define @intrinsic_vfsub_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, i32 %2) nounwind { +define @intrinsic_vfsub_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -377,7 +379,7 @@ entry: %a = call @llvm.riscv.vfsub.nxv4f32.nxv4f32( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -387,10 +389,10 @@ declare @llvm.riscv.vfsub.mask.nxv4f32.nxv4f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsub_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfsub_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -402,7 +404,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -410,9 +412,9 @@ entry: declare @llvm.riscv.vfsub.nxv8f32.nxv8f32( , , - i32); + iXLen); -define @intrinsic_vfsub_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, i32 %2) nounwind { +define @intrinsic_vfsub_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -422,7 +424,7 @@ entry: %a = call @llvm.riscv.vfsub.nxv8f32.nxv8f32( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -432,10 +434,10 @@ declare @llvm.riscv.vfsub.mask.nxv8f32.nxv8f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsub_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfsub_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -447,7 +449,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -455,9 +457,9 @@ entry: declare @llvm.riscv.vfsub.nxv16f32.nxv16f32( , , - i32); + iXLen); -define @intrinsic_vfsub_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, i32 %2) nounwind { +define @intrinsic_vfsub_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vv_nxv16f32_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu @@ -467,7 +469,7 @@ entry: %a = call @llvm.riscv.vfsub.nxv16f32.nxv16f32( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -477,10 +479,10 @@ declare @llvm.riscv.vfsub.mask.nxv16f32.nxv16f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsub_mask_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfsub_mask_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv16f32_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re32.v v24, (a0) @@ -493,7 +495,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -501,9 +503,9 @@ entry: declare @llvm.riscv.vfsub.nxv1f64.nxv1f64( , , - i32); + iXLen); -define @intrinsic_vfsub_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, i32 %2) nounwind { +define @intrinsic_vfsub_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu @@ -513,7 +515,7 @@ entry: %a = call @llvm.riscv.vfsub.nxv1f64.nxv1f64( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -523,10 +525,10 @@ declare @llvm.riscv.vfsub.mask.nxv1f64.nxv1f64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsub_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfsub_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu @@ -538,7 +540,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -546,9 +548,9 @@ entry: declare @llvm.riscv.vfsub.nxv2f64.nxv2f64( , , - i32); + iXLen); -define @intrinsic_vfsub_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, i32 %2) nounwind { +define @intrinsic_vfsub_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu @@ -558,7 +560,7 @@ entry: %a = call @llvm.riscv.vfsub.nxv2f64.nxv2f64( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -568,10 +570,10 @@ declare @llvm.riscv.vfsub.mask.nxv2f64.nxv2f64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsub_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfsub_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu @@ -583,7 +585,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -591,9 +593,9 @@ entry: declare @llvm.riscv.vfsub.nxv4f64.nxv4f64( , , - i32); + iXLen); -define @intrinsic_vfsub_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, i32 %2) nounwind { +define @intrinsic_vfsub_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu @@ -603,7 +605,7 @@ entry: %a = call @llvm.riscv.vfsub.nxv4f64.nxv4f64( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -613,10 +615,10 @@ declare @llvm.riscv.vfsub.mask.nxv4f64.nxv4f64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsub_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfsub_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu @@ -628,7 +630,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -636,9 +638,9 @@ entry: declare @llvm.riscv.vfsub.nxv8f64.nxv8f64( , , - i32); + iXLen); -define @intrinsic_vfsub_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, i32 %2) nounwind { +define @intrinsic_vfsub_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vv_nxv8f64_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu @@ -648,7 +650,7 @@ entry: %a = call @llvm.riscv.vfsub.nxv8f64.nxv8f64( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -658,10 +660,10 @@ declare @llvm.riscv.vfsub.mask.nxv8f64.nxv8f64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsub_mask_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfsub_mask_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv8f64_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re64.v v24, (a0) @@ -674,7 +676,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -682,9 +684,9 @@ entry: declare @llvm.riscv.vfsub.nxv1f16.f16( , half, - i32); + iXLen); -define @intrinsic_vfsub_vf_nxv1f16_nxv1f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfsub_vf_nxv1f16_nxv1f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -694,7 +696,7 @@ entry: %a = call @llvm.riscv.vfsub.nxv1f16.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -704,10 +706,10 @@ declare @llvm.riscv.vfsub.mask.nxv1f16.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsub_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfsub_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -719,7 +721,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -727,9 +729,9 @@ entry: declare @llvm.riscv.vfsub.nxv2f16.f16( , half, - i32); + iXLen); -define @intrinsic_vfsub_vf_nxv2f16_nxv2f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfsub_vf_nxv2f16_nxv2f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -739,7 +741,7 @@ entry: %a = call @llvm.riscv.vfsub.nxv2f16.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -749,10 +751,10 @@ declare @llvm.riscv.vfsub.mask.nxv2f16.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsub_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfsub_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -764,7 +766,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -772,9 +774,9 @@ entry: declare @llvm.riscv.vfsub.nxv4f16.f16( , half, - i32); + iXLen); -define @intrinsic_vfsub_vf_nxv4f16_nxv4f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfsub_vf_nxv4f16_nxv4f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -784,7 +786,7 @@ entry: %a = call @llvm.riscv.vfsub.nxv4f16.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -794,10 +796,10 @@ declare @llvm.riscv.vfsub.mask.nxv4f16.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsub_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfsub_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -809,7 +811,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -817,9 +819,9 @@ entry: declare @llvm.riscv.vfsub.nxv8f16.f16( , half, - i32); + iXLen); -define @intrinsic_vfsub_vf_nxv8f16_nxv8f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfsub_vf_nxv8f16_nxv8f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -829,7 +831,7 @@ entry: %a = call @llvm.riscv.vfsub.nxv8f16.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -839,10 +841,10 @@ declare @llvm.riscv.vfsub.mask.nxv8f16.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsub_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfsub_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -854,7 +856,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -862,9 +864,9 @@ entry: declare @llvm.riscv.vfsub.nxv16f16.f16( , half, - i32); + iXLen); -define @intrinsic_vfsub_vf_nxv16f16_nxv16f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfsub_vf_nxv16f16_nxv16f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -874,7 +876,7 @@ entry: %a = call @llvm.riscv.vfsub.nxv16f16.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -884,10 +886,10 @@ declare @llvm.riscv.vfsub.mask.nxv16f16.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsub_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfsub_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -899,7 +901,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -907,9 +909,9 @@ entry: declare @llvm.riscv.vfsub.nxv32f16.f16( , half, - i32); + iXLen); -define @intrinsic_vfsub_vf_nxv32f16_nxv32f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfsub_vf_nxv32f16_nxv32f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu @@ -919,7 +921,7 @@ entry: %a = call @llvm.riscv.vfsub.nxv32f16.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -929,10 +931,10 @@ declare @llvm.riscv.vfsub.mask.nxv32f16.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsub_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfsub_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu @@ -944,7 +946,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -952,9 +954,9 @@ entry: declare @llvm.riscv.vfsub.nxv1f32.f32( , float, - i32); + iXLen); -define @intrinsic_vfsub_vf_nxv1f32_nxv1f32_f32( %0, float %1, i32 %2) nounwind { +define @intrinsic_vfsub_vf_nxv1f32_nxv1f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -964,7 +966,7 @@ entry: %a = call @llvm.riscv.vfsub.nxv1f32.f32( %0, float %1, - i32 %2) + iXLen %2) ret %a } @@ -974,10 +976,10 @@ declare @llvm.riscv.vfsub.mask.nxv1f32.f32( , float, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsub_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +define @intrinsic_vfsub_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -989,7 +991,7 @@ entry: %1, float %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -997,9 +999,9 @@ entry: declare @llvm.riscv.vfsub.nxv2f32.f32( , float, - i32); + iXLen); -define @intrinsic_vfsub_vf_nxv2f32_nxv2f32_f32( %0, float %1, i32 %2) nounwind { +define @intrinsic_vfsub_vf_nxv2f32_nxv2f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -1009,7 +1011,7 @@ entry: %a = call @llvm.riscv.vfsub.nxv2f32.f32( %0, float %1, - i32 %2) + iXLen %2) ret %a } @@ -1019,10 +1021,10 @@ declare @llvm.riscv.vfsub.mask.nxv2f32.f32( , float, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsub_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +define @intrinsic_vfsub_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -1034,7 +1036,7 @@ entry: %1, float %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1042,9 +1044,9 @@ entry: declare @llvm.riscv.vfsub.nxv4f32.f32( , float, - i32); + iXLen); -define @intrinsic_vfsub_vf_nxv4f32_nxv4f32_f32( %0, float %1, i32 %2) nounwind { +define @intrinsic_vfsub_vf_nxv4f32_nxv4f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -1054,7 +1056,7 @@ entry: %a = call @llvm.riscv.vfsub.nxv4f32.f32( %0, float %1, - i32 %2) + iXLen %2) ret %a } @@ -1064,10 +1066,10 @@ declare @llvm.riscv.vfsub.mask.nxv4f32.f32( , float, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsub_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +define @intrinsic_vfsub_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -1079,7 +1081,7 @@ entry: %1, float %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1087,9 +1089,9 @@ entry: declare @llvm.riscv.vfsub.nxv8f32.f32( , float, - i32); + iXLen); -define @intrinsic_vfsub_vf_nxv8f32_nxv8f32_f32( %0, float %1, i32 %2) nounwind { +define @intrinsic_vfsub_vf_nxv8f32_nxv8f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -1099,7 +1101,7 @@ entry: %a = call @llvm.riscv.vfsub.nxv8f32.f32( %0, float %1, - i32 %2) + iXLen %2) ret %a } @@ -1109,10 +1111,10 @@ declare @llvm.riscv.vfsub.mask.nxv8f32.f32( , float, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsub_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +define @intrinsic_vfsub_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -1124,7 +1126,7 @@ entry: %1, float %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1132,9 +1134,9 @@ entry: declare @llvm.riscv.vfsub.nxv16f32.f32( , float, - i32); + iXLen); -define @intrinsic_vfsub_vf_nxv16f32_nxv16f32_f32( %0, float %1, i32 %2) nounwind { +define @intrinsic_vfsub_vf_nxv16f32_nxv16f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu @@ -1144,7 +1146,7 @@ entry: %a = call @llvm.riscv.vfsub.nxv16f32.f32( %0, float %1, - i32 %2) + iXLen %2) ret %a } @@ -1154,10 +1156,10 @@ declare @llvm.riscv.vfsub.mask.nxv16f32.f32( , float, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsub_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +define @intrinsic_vfsub_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu @@ -1169,7 +1171,7 @@ entry: %1, float %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1177,9 +1179,9 @@ entry: declare @llvm.riscv.vfsub.nxv1f64.f64( , double, - i32); + iXLen); -define @intrinsic_vfsub_vf_nxv1f64_nxv1f64_f64( %0, double %1, i32 %2) nounwind { +define @intrinsic_vfsub_vf_nxv1f64_nxv1f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu @@ -1189,7 +1191,7 @@ entry: %a = call @llvm.riscv.vfsub.nxv1f64.f64( %0, double %1, - i32 %2) + iXLen %2) ret %a } @@ -1199,10 +1201,10 @@ declare @llvm.riscv.vfsub.mask.nxv1f64.f64( , double, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsub_mask_vf_nxv1f64_nxv1f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { +define @intrinsic_vfsub_mask_vf_nxv1f64_nxv1f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu @@ -1214,7 +1216,7 @@ entry: %1, double %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1222,9 +1224,9 @@ entry: declare @llvm.riscv.vfsub.nxv2f64.f64( , double, - i32); + iXLen); -define @intrinsic_vfsub_vf_nxv2f64_nxv2f64_f64( %0, double %1, i32 %2) nounwind { +define @intrinsic_vfsub_vf_nxv2f64_nxv2f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu @@ -1234,7 +1236,7 @@ entry: %a = call @llvm.riscv.vfsub.nxv2f64.f64( %0, double %1, - i32 %2) + iXLen %2) ret %a } @@ -1244,10 +1246,10 @@ declare @llvm.riscv.vfsub.mask.nxv2f64.f64( , double, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsub_mask_vf_nxv2f64_nxv2f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { +define @intrinsic_vfsub_mask_vf_nxv2f64_nxv2f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu @@ -1259,7 +1261,7 @@ entry: %1, double %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1267,9 +1269,9 @@ entry: declare @llvm.riscv.vfsub.nxv4f64.f64( , double, - i32); + iXLen); -define @intrinsic_vfsub_vf_nxv4f64_nxv4f64_f64( %0, double %1, i32 %2) nounwind { +define @intrinsic_vfsub_vf_nxv4f64_nxv4f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu @@ -1279,7 +1281,7 @@ entry: %a = call @llvm.riscv.vfsub.nxv4f64.f64( %0, double %1, - i32 %2) + iXLen %2) ret %a } @@ -1289,10 +1291,10 @@ declare @llvm.riscv.vfsub.mask.nxv4f64.f64( , double, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsub_mask_vf_nxv4f64_nxv4f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { +define @intrinsic_vfsub_mask_vf_nxv4f64_nxv4f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu @@ -1304,7 +1306,7 @@ entry: %1, double %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1312,9 +1314,9 @@ entry: declare @llvm.riscv.vfsub.nxv8f64.f64( , double, - i32); + iXLen); -define @intrinsic_vfsub_vf_nxv8f64_nxv8f64_f64( %0, double %1, i32 %2) nounwind { +define @intrinsic_vfsub_vf_nxv8f64_nxv8f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu @@ -1324,7 +1326,7 @@ entry: %a = call @llvm.riscv.vfsub.nxv8f64.f64( %0, double %1, - i32 %2) + iXLen %2) ret %a } @@ -1334,10 +1336,10 @@ declare @llvm.riscv.vfsub.mask.nxv8f64.f64( , double, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfsub_mask_vf_nxv8f64_nxv8f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { +define @intrinsic_vfsub_mask_vf_nxv8f64_nxv8f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu @@ -1349,7 +1351,7 @@ entry: %1, double %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwadd-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwadd-rv64.ll deleted file mode 100644 index a04b9a54b930..000000000000 --- a/llvm/test/CodeGen/RISCV/rvv/vfwadd-rv64.ll +++ /dev/null @@ -1,830 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: -target-abi=lp64d < %s | FileCheck %s -declare @llvm.riscv.vfwadd.nxv1f32.nxv1f16.nxv1f16( - , - , - i64); - -define @intrinsic_vfwadd_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwadd_vv_nxv1f32_nxv1f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfwadd.vv v10, v8, v9 -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.nxv1f32.nxv1f16.nxv1f16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfwadd.mask.nxv1f32.nxv1f16.nxv1f16( - , - , - , - , - i64, - i64); - -define @intrinsic_vfwadd_mask_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv1f32_nxv1f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfwadd.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.mask.nxv1f32.nxv1f16.nxv1f16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwadd.nxv2f32.nxv2f16.nxv2f16( - , - , - i64); - -define @intrinsic_vfwadd_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwadd_vv_nxv2f32_nxv2f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfwadd.vv v10, v8, v9 -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.nxv2f32.nxv2f16.nxv2f16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfwadd.mask.nxv2f32.nxv2f16.nxv2f16( - , - , - , - , - i64, - i64); - -define @intrinsic_vfwadd_mask_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv2f32_nxv2f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfwadd.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.mask.nxv2f32.nxv2f16.nxv2f16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwadd.nxv4f32.nxv4f16.nxv4f16( - , - , - i64); - -define @intrinsic_vfwadd_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwadd_vv_nxv4f32_nxv4f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfwadd.vv v10, v8, v9 -; CHECK-NEXT: vmv2r.v v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.nxv4f32.nxv4f16.nxv4f16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfwadd.mask.nxv4f32.nxv4f16.nxv4f16( - , - , - , - , - i64, - i64); - -define @intrinsic_vfwadd_mask_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv4f32_nxv4f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfwadd.vv v8, v10, v11, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.mask.nxv4f32.nxv4f16.nxv4f16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwadd.nxv8f32.nxv8f16.nxv8f16( - , - , - i64); - -define @intrinsic_vfwadd_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwadd_vv_nxv8f32_nxv8f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfwadd.vv v12, v8, v10 -; CHECK-NEXT: vmv4r.v v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.nxv8f32.nxv8f16.nxv8f16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfwadd.mask.nxv8f32.nxv8f16.nxv8f16( - , - , - , - , - i64, - i64); - -define @intrinsic_vfwadd_mask_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv8f32_nxv8f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfwadd.vv v8, v12, v14, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.mask.nxv8f32.nxv8f16.nxv8f16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwadd.nxv16f32.nxv16f16.nxv16f16( - , - , - i64); - -define @intrinsic_vfwadd_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwadd_vv_nxv16f32_nxv16f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfwadd.vv v16, v8, v12 -; CHECK-NEXT: vmv8r.v v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.nxv16f32.nxv16f16.nxv16f16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfwadd.mask.nxv16f32.nxv16f16.nxv16f16( - , - , - , - , - i64, - i64); - -define @intrinsic_vfwadd_mask_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv16f32_nxv16f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfwadd.vv v8, v16, v20, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.mask.nxv16f32.nxv16f16.nxv16f16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwadd.nxv1f64.nxv1f32.nxv1f32( - , - , - i64); - -define @intrinsic_vfwadd_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwadd_vv_nxv1f64_nxv1f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfwadd.vv v10, v8, v9 -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.nxv1f64.nxv1f32.nxv1f32( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfwadd.mask.nxv1f64.nxv1f32.nxv1f32( - , - , - , - , - i64, - i64); - -define @intrinsic_vfwadd_mask_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv1f64_nxv1f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfwadd.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.mask.nxv1f64.nxv1f32.nxv1f32( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwadd.nxv2f64.nxv2f32.nxv2f32( - , - , - i64); - -define @intrinsic_vfwadd_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwadd_vv_nxv2f64_nxv2f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfwadd.vv v10, v8, v9 -; CHECK-NEXT: vmv2r.v v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.nxv2f64.nxv2f32.nxv2f32( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfwadd.mask.nxv2f64.nxv2f32.nxv2f32( - , - , - , - , - i64, - i64); - -define @intrinsic_vfwadd_mask_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv2f64_nxv2f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfwadd.vv v8, v10, v11, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.mask.nxv2f64.nxv2f32.nxv2f32( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwadd.nxv4f64.nxv4f32.nxv4f32( - , - , - i64); - -define @intrinsic_vfwadd_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwadd_vv_nxv4f64_nxv4f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfwadd.vv v12, v8, v10 -; CHECK-NEXT: vmv4r.v v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.nxv4f64.nxv4f32.nxv4f32( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfwadd.mask.nxv4f64.nxv4f32.nxv4f32( - , - , - , - , - i64, - i64); - -define @intrinsic_vfwadd_mask_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv4f64_nxv4f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfwadd.vv v8, v12, v14, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.mask.nxv4f64.nxv4f32.nxv4f32( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwadd.nxv8f64.nxv8f32.nxv8f32( - , - , - i64); - -define @intrinsic_vfwadd_vv_nxv8f64_nxv8f32_nxv8f32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwadd_vv_nxv8f64_nxv8f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfwadd.vv v16, v8, v12 -; CHECK-NEXT: vmv8r.v v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.nxv8f64.nxv8f32.nxv8f32( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfwadd.mask.nxv8f64.nxv8f32.nxv8f32( - , - , - , - , - i64, - i64); - -define @intrinsic_vfwadd_mask_vv_nxv8f64_nxv8f32_nxv8f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv8f64_nxv8f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfwadd.vv v8, v16, v20, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.mask.nxv8f64.nxv8f32.nxv8f32( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwadd.nxv1f32.nxv1f16.f16( - , - half, - i64); - -define @intrinsic_vfwadd_vf_nxv1f32_nxv1f16_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwadd_vf_nxv1f32_nxv1f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfwadd.vf v9, v8, fa0 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.nxv1f32.nxv1f16.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfwadd.mask.nxv1f32.nxv1f16.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfwadd_mask_vf_nxv1f32_nxv1f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv1f32_nxv1f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfwadd.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.mask.nxv1f32.nxv1f16.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwadd.nxv2f32.nxv2f16.f16( - , - half, - i64); - -define @intrinsic_vfwadd_vf_nxv2f32_nxv2f16_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwadd_vf_nxv2f32_nxv2f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfwadd.vf v9, v8, fa0 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.nxv2f32.nxv2f16.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfwadd.mask.nxv2f32.nxv2f16.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfwadd_mask_vf_nxv2f32_nxv2f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv2f32_nxv2f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfwadd.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.mask.nxv2f32.nxv2f16.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwadd.nxv4f32.nxv4f16.f16( - , - half, - i64); - -define @intrinsic_vfwadd_vf_nxv4f32_nxv4f16_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwadd_vf_nxv4f32_nxv4f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfwadd.vf v10, v8, fa0 -; CHECK-NEXT: vmv2r.v v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.nxv4f32.nxv4f16.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfwadd.mask.nxv4f32.nxv4f16.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfwadd_mask_vf_nxv4f32_nxv4f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv4f32_nxv4f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfwadd.vf v8, v10, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.mask.nxv4f32.nxv4f16.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwadd.nxv8f32.nxv8f16.f16( - , - half, - i64); - -define @intrinsic_vfwadd_vf_nxv8f32_nxv8f16_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwadd_vf_nxv8f32_nxv8f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfwadd.vf v12, v8, fa0 -; CHECK-NEXT: vmv4r.v v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.nxv8f32.nxv8f16.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfwadd.mask.nxv8f32.nxv8f16.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfwadd_mask_vf_nxv8f32_nxv8f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv8f32_nxv8f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfwadd.vf v8, v12, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.mask.nxv8f32.nxv8f16.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwadd.nxv16f32.nxv16f16.f16( - , - half, - i64); - -define @intrinsic_vfwadd_vf_nxv16f32_nxv16f16_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwadd_vf_nxv16f32_nxv16f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfwadd.vf v16, v8, fa0 -; CHECK-NEXT: vmv8r.v v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.nxv16f32.nxv16f16.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfwadd.mask.nxv16f32.nxv16f16.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfwadd_mask_vf_nxv16f32_nxv16f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv16f32_nxv16f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfwadd.vf v8, v16, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.mask.nxv16f32.nxv16f16.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwadd.nxv1f64.nxv1f32.f32( - , - float, - i64); - -define @intrinsic_vfwadd_vf_nxv1f64_nxv1f32_f32( %0, float %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwadd_vf_nxv1f64_nxv1f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfwadd.vf v9, v8, fa0 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.nxv1f64.nxv1f32.f32( - %0, - float %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfwadd.mask.nxv1f64.nxv1f32.f32( - , - , - float, - , - i64, - i64); - -define @intrinsic_vfwadd_mask_vf_nxv1f64_nxv1f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv1f64_nxv1f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfwadd.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.mask.nxv1f64.nxv1f32.f32( - %0, - %1, - float %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwadd.nxv2f64.nxv2f32.f32( - , - float, - i64); - -define @intrinsic_vfwadd_vf_nxv2f64_nxv2f32_f32( %0, float %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwadd_vf_nxv2f64_nxv2f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfwadd.vf v10, v8, fa0 -; CHECK-NEXT: vmv2r.v v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.nxv2f64.nxv2f32.f32( - %0, - float %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfwadd.mask.nxv2f64.nxv2f32.f32( - , - , - float, - , - i64, - i64); - -define @intrinsic_vfwadd_mask_vf_nxv2f64_nxv2f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv2f64_nxv2f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfwadd.vf v8, v10, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.mask.nxv2f64.nxv2f32.f32( - %0, - %1, - float %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwadd.nxv4f64.nxv4f32.f32( - , - float, - i64); - -define @intrinsic_vfwadd_vf_nxv4f64_nxv4f32_f32( %0, float %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwadd_vf_nxv4f64_nxv4f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfwadd.vf v12, v8, fa0 -; CHECK-NEXT: vmv4r.v v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.nxv4f64.nxv4f32.f32( - %0, - float %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfwadd.mask.nxv4f64.nxv4f32.f32( - , - , - float, - , - i64, - i64); - -define @intrinsic_vfwadd_mask_vf_nxv4f64_nxv4f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv4f64_nxv4f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfwadd.vf v8, v12, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.mask.nxv4f64.nxv4f32.f32( - %0, - %1, - float %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwadd.nxv8f64.nxv8f32.f32( - , - float, - i64); - -define @intrinsic_vfwadd_vf_nxv8f64_nxv8f32_f32( %0, float %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwadd_vf_nxv8f64_nxv8f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfwadd.vf v16, v8, fa0 -; CHECK-NEXT: vmv8r.v v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.nxv8f64.nxv8f32.f32( - %0, - float %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfwadd.mask.nxv8f64.nxv8f32.f32( - , - , - float, - , - i64, - i64); - -define @intrinsic_vfwadd_mask_vf_nxv8f64_nxv8f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv8f64_nxv8f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfwadd.vf v8, v16, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.mask.nxv8f64.nxv8f32.f32( - %0, - %1, - float %2, - %3, - i64 %4, i64 1) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwadd-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwadd.ll similarity index 86% rename from llvm/test/CodeGen/RISCV/rvv/vfwadd-rv32.ll rename to llvm/test/CodeGen/RISCV/rvv/vfwadd.ll index a3bdcc457328..541f2b8564f2 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfwadd-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwadd.ll @@ -1,12 +1,14 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: -target-abi=ilp32d < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfwadd.nxv1f32.nxv1f16.nxv1f16( , , - i32); + iXLen); -define @intrinsic_vfwadd_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, i32 %2) nounwind { +define @intrinsic_vfwadd_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_vv_nxv1f32_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -17,7 +19,7 @@ entry: %a = call @llvm.riscv.vfwadd.nxv1f32.nxv1f16.nxv1f16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -27,10 +29,10 @@ declare @llvm.riscv.vfwadd.mask.nxv1f32.nxv1f16.nxv1f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwadd_mask_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwadd_mask_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv1f32_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -42,7 +44,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -50,9 +52,9 @@ entry: declare @llvm.riscv.vfwadd.nxv2f32.nxv2f16.nxv2f16( , , - i32); + iXLen); -define @intrinsic_vfwadd_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, i32 %2) nounwind { +define @intrinsic_vfwadd_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_vv_nxv2f32_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -63,7 +65,7 @@ entry: %a = call @llvm.riscv.vfwadd.nxv2f32.nxv2f16.nxv2f16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -73,10 +75,10 @@ declare @llvm.riscv.vfwadd.mask.nxv2f32.nxv2f16.nxv2f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwadd_mask_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwadd_mask_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv2f32_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -88,7 +90,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -96,9 +98,9 @@ entry: declare @llvm.riscv.vfwadd.nxv4f32.nxv4f16.nxv4f16( , , - i32); + iXLen); -define @intrinsic_vfwadd_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, i32 %2) nounwind { +define @intrinsic_vfwadd_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_vv_nxv4f32_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -109,7 +111,7 @@ entry: %a = call @llvm.riscv.vfwadd.nxv4f32.nxv4f16.nxv4f16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -119,10 +121,10 @@ declare @llvm.riscv.vfwadd.mask.nxv4f32.nxv4f16.nxv4f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwadd_mask_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwadd_mask_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv4f32_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -134,7 +136,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -142,9 +144,9 @@ entry: declare @llvm.riscv.vfwadd.nxv8f32.nxv8f16.nxv8f16( , , - i32); + iXLen); -define @intrinsic_vfwadd_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, i32 %2) nounwind { +define @intrinsic_vfwadd_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_vv_nxv8f32_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -155,7 +157,7 @@ entry: %a = call @llvm.riscv.vfwadd.nxv8f32.nxv8f16.nxv8f16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -165,10 +167,10 @@ declare @llvm.riscv.vfwadd.mask.nxv8f32.nxv8f16.nxv8f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwadd_mask_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwadd_mask_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv8f32_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -180,7 +182,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -188,9 +190,9 @@ entry: declare @llvm.riscv.vfwadd.nxv16f32.nxv16f16.nxv16f16( , , - i32); + iXLen); -define @intrinsic_vfwadd_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, i32 %2) nounwind { +define @intrinsic_vfwadd_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_vv_nxv16f32_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -201,7 +203,7 @@ entry: %a = call @llvm.riscv.vfwadd.nxv16f32.nxv16f16.nxv16f16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -211,10 +213,10 @@ declare @llvm.riscv.vfwadd.mask.nxv16f32.nxv16f16.nxv16f16 , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwadd_mask_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwadd_mask_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv16f32_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -226,7 +228,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -234,9 +236,9 @@ entry: declare @llvm.riscv.vfwadd.nxv1f64.nxv1f32.nxv1f32( , , - i32); + iXLen); -define @intrinsic_vfwadd_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, i32 %2) nounwind { +define @intrinsic_vfwadd_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_vv_nxv1f64_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -247,7 +249,7 @@ entry: %a = call @llvm.riscv.vfwadd.nxv1f64.nxv1f32.nxv1f32( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -257,10 +259,10 @@ declare @llvm.riscv.vfwadd.mask.nxv1f64.nxv1f32.nxv1f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwadd_mask_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwadd_mask_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv1f64_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -272,7 +274,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -280,9 +282,9 @@ entry: declare @llvm.riscv.vfwadd.nxv2f64.nxv2f32.nxv2f32( , , - i32); + iXLen); -define @intrinsic_vfwadd_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, i32 %2) nounwind { +define @intrinsic_vfwadd_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_vv_nxv2f64_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -293,7 +295,7 @@ entry: %a = call @llvm.riscv.vfwadd.nxv2f64.nxv2f32.nxv2f32( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -303,10 +305,10 @@ declare @llvm.riscv.vfwadd.mask.nxv2f64.nxv2f32.nxv2f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwadd_mask_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwadd_mask_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv2f64_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -318,7 +320,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -326,9 +328,9 @@ entry: declare @llvm.riscv.vfwadd.nxv4f64.nxv4f32.nxv4f32( , , - i32); + iXLen); -define @intrinsic_vfwadd_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, i32 %2) nounwind { +define @intrinsic_vfwadd_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_vv_nxv4f64_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -339,7 +341,7 @@ entry: %a = call @llvm.riscv.vfwadd.nxv4f64.nxv4f32.nxv4f32( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -349,10 +351,10 @@ declare @llvm.riscv.vfwadd.mask.nxv4f64.nxv4f32.nxv4f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwadd_mask_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwadd_mask_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv4f64_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -364,7 +366,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -372,9 +374,9 @@ entry: declare @llvm.riscv.vfwadd.nxv8f64.nxv8f32.nxv8f32( , , - i32); + iXLen); -define @intrinsic_vfwadd_vv_nxv8f64_nxv8f32_nxv8f32( %0, %1, i32 %2) nounwind { +define @intrinsic_vfwadd_vv_nxv8f64_nxv8f32_nxv8f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_vv_nxv8f64_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -385,7 +387,7 @@ entry: %a = call @llvm.riscv.vfwadd.nxv8f64.nxv8f32.nxv8f32( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -395,10 +397,10 @@ declare @llvm.riscv.vfwadd.mask.nxv8f64.nxv8f32.nxv8f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwadd_mask_vv_nxv8f64_nxv8f32_nxv8f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwadd_mask_vv_nxv8f64_nxv8f32_nxv8f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv8f64_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -410,7 +412,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -418,9 +420,9 @@ entry: declare @llvm.riscv.vfwadd.nxv1f32.nxv1f16.f16( , half, - i32); + iXLen); -define @intrinsic_vfwadd_vf_nxv1f32_nxv1f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfwadd_vf_nxv1f32_nxv1f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_vf_nxv1f32_nxv1f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -431,7 +433,7 @@ entry: %a = call @llvm.riscv.vfwadd.nxv1f32.nxv1f16.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -441,10 +443,10 @@ declare @llvm.riscv.vfwadd.mask.nxv1f32.nxv1f16.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwadd_mask_vf_nxv1f32_nxv1f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfwadd_mask_vf_nxv1f32_nxv1f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv1f32_nxv1f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -456,7 +458,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -464,9 +466,9 @@ entry: declare @llvm.riscv.vfwadd.nxv2f32.nxv2f16.f16( , half, - i32); + iXLen); -define @intrinsic_vfwadd_vf_nxv2f32_nxv2f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfwadd_vf_nxv2f32_nxv2f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_vf_nxv2f32_nxv2f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -477,7 +479,7 @@ entry: %a = call @llvm.riscv.vfwadd.nxv2f32.nxv2f16.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -487,10 +489,10 @@ declare @llvm.riscv.vfwadd.mask.nxv2f32.nxv2f16.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwadd_mask_vf_nxv2f32_nxv2f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfwadd_mask_vf_nxv2f32_nxv2f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv2f32_nxv2f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -502,7 +504,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -510,9 +512,9 @@ entry: declare @llvm.riscv.vfwadd.nxv4f32.nxv4f16.f16( , half, - i32); + iXLen); -define @intrinsic_vfwadd_vf_nxv4f32_nxv4f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfwadd_vf_nxv4f32_nxv4f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_vf_nxv4f32_nxv4f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -523,7 +525,7 @@ entry: %a = call @llvm.riscv.vfwadd.nxv4f32.nxv4f16.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -533,10 +535,10 @@ declare @llvm.riscv.vfwadd.mask.nxv4f32.nxv4f16.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwadd_mask_vf_nxv4f32_nxv4f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfwadd_mask_vf_nxv4f32_nxv4f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv4f32_nxv4f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -548,7 +550,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -556,9 +558,9 @@ entry: declare @llvm.riscv.vfwadd.nxv8f32.nxv8f16.f16( , half, - i32); + iXLen); -define @intrinsic_vfwadd_vf_nxv8f32_nxv8f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfwadd_vf_nxv8f32_nxv8f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_vf_nxv8f32_nxv8f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -569,7 +571,7 @@ entry: %a = call @llvm.riscv.vfwadd.nxv8f32.nxv8f16.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -579,10 +581,10 @@ declare @llvm.riscv.vfwadd.mask.nxv8f32.nxv8f16.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwadd_mask_vf_nxv8f32_nxv8f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfwadd_mask_vf_nxv8f32_nxv8f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv8f32_nxv8f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -594,7 +596,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -602,9 +604,9 @@ entry: declare @llvm.riscv.vfwadd.nxv16f32.nxv16f16.f16( , half, - i32); + iXLen); -define @intrinsic_vfwadd_vf_nxv16f32_nxv16f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfwadd_vf_nxv16f32_nxv16f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_vf_nxv16f32_nxv16f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -615,7 +617,7 @@ entry: %a = call @llvm.riscv.vfwadd.nxv16f32.nxv16f16.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -625,10 +627,10 @@ declare @llvm.riscv.vfwadd.mask.nxv16f32.nxv16f16.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwadd_mask_vf_nxv16f32_nxv16f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfwadd_mask_vf_nxv16f32_nxv16f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv16f32_nxv16f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -640,7 +642,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -648,9 +650,9 @@ entry: declare @llvm.riscv.vfwadd.nxv1f64.nxv1f32.f32( , float, - i32); + iXLen); -define @intrinsic_vfwadd_vf_nxv1f64_nxv1f32_f32( %0, float %1, i32 %2) nounwind { +define @intrinsic_vfwadd_vf_nxv1f64_nxv1f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_vf_nxv1f64_nxv1f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -661,7 +663,7 @@ entry: %a = call @llvm.riscv.vfwadd.nxv1f64.nxv1f32.f32( %0, float %1, - i32 %2) + iXLen %2) ret %a } @@ -671,10 +673,10 @@ declare @llvm.riscv.vfwadd.mask.nxv1f64.nxv1f32.f32( , float, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwadd_mask_vf_nxv1f64_nxv1f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +define @intrinsic_vfwadd_mask_vf_nxv1f64_nxv1f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv1f64_nxv1f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -686,7 +688,7 @@ entry: %1, float %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -694,9 +696,9 @@ entry: declare @llvm.riscv.vfwadd.nxv2f64.nxv2f32.f32( , float, - i32); + iXLen); -define @intrinsic_vfwadd_vf_nxv2f64_nxv2f32_f32( %0, float %1, i32 %2) nounwind { +define @intrinsic_vfwadd_vf_nxv2f64_nxv2f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_vf_nxv2f64_nxv2f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -707,7 +709,7 @@ entry: %a = call @llvm.riscv.vfwadd.nxv2f64.nxv2f32.f32( %0, float %1, - i32 %2) + iXLen %2) ret %a } @@ -717,10 +719,10 @@ declare @llvm.riscv.vfwadd.mask.nxv2f64.nxv2f32.f32( , float, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwadd_mask_vf_nxv2f64_nxv2f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +define @intrinsic_vfwadd_mask_vf_nxv2f64_nxv2f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv2f64_nxv2f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -732,7 +734,7 @@ entry: %1, float %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -740,9 +742,9 @@ entry: declare @llvm.riscv.vfwadd.nxv4f64.nxv4f32.f32( , float, - i32); + iXLen); -define @intrinsic_vfwadd_vf_nxv4f64_nxv4f32_f32( %0, float %1, i32 %2) nounwind { +define @intrinsic_vfwadd_vf_nxv4f64_nxv4f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_vf_nxv4f64_nxv4f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -753,7 +755,7 @@ entry: %a = call @llvm.riscv.vfwadd.nxv4f64.nxv4f32.f32( %0, float %1, - i32 %2) + iXLen %2) ret %a } @@ -763,10 +765,10 @@ declare @llvm.riscv.vfwadd.mask.nxv4f64.nxv4f32.f32( , float, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwadd_mask_vf_nxv4f64_nxv4f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +define @intrinsic_vfwadd_mask_vf_nxv4f64_nxv4f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv4f64_nxv4f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -778,7 +780,7 @@ entry: %1, float %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -786,9 +788,9 @@ entry: declare @llvm.riscv.vfwadd.nxv8f64.nxv8f32.f32( , float, - i32); + iXLen); -define @intrinsic_vfwadd_vf_nxv8f64_nxv8f32_f32( %0, float %1, i32 %2) nounwind { +define @intrinsic_vfwadd_vf_nxv8f64_nxv8f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_vf_nxv8f64_nxv8f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -799,7 +801,7 @@ entry: %a = call @llvm.riscv.vfwadd.nxv8f64.nxv8f32.f32( %0, float %1, - i32 %2) + iXLen %2) ret %a } @@ -809,10 +811,10 @@ declare @llvm.riscv.vfwadd.mask.nxv8f64.nxv8f32.f32( , float, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwadd_mask_vf_nxv8f64_nxv8f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +define @intrinsic_vfwadd_mask_vf_nxv8f64_nxv8f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv8f64_nxv8f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -824,7 +826,7 @@ entry: %1, float %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwadd.w-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwadd.w-rv64.ll deleted file mode 100644 index 3586ec64e9df..000000000000 --- a/llvm/test/CodeGen/RISCV/rvv/vfwadd.w-rv64.ll +++ /dev/null @@ -1,1248 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: -target-abi=lp64d < %s | FileCheck %s -declare @llvm.riscv.vfwadd.w.nxv1f32.nxv1f16( - , - , - i64); - -define @intrinsic_vfwadd.w_wv_nxv1f32_nxv1f32_nxv1f16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv1f32_nxv1f32_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfwadd.wv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.w.nxv1f32.nxv1f16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfwadd.w.mask.nxv1f32.nxv1f16( - , - , - , - , - i64, - i64); - -define @intrinsic_vfwadd.w_mask_wv_nxv1f32_nxv1f32_nxv1f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv1f32_nxv1f32_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfwadd.wv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.w.mask.nxv1f32.nxv1f16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwadd.w.nxv2f32.nxv2f16( - , - , - i64); - -define @intrinsic_vfwadd.w_wv_nxv2f32_nxv2f32_nxv2f16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv2f32_nxv2f32_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfwadd.wv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.w.nxv2f32.nxv2f16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfwadd.w.mask.nxv2f32.nxv2f16( - , - , - , - , - i64, - i64); - -define @intrinsic_vfwadd.w_mask_wv_nxv2f32_nxv2f32_nxv2f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv2f32_nxv2f32_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfwadd.wv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.w.mask.nxv2f32.nxv2f16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwadd.w.nxv4f32.nxv4f16( - , - , - i64); - -define @intrinsic_vfwadd.w_wv_nxv4f32_nxv4f32_nxv4f16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv4f32_nxv4f32_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfwadd.wv v8, v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.w.nxv4f32.nxv4f16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfwadd.w.mask.nxv4f32.nxv4f16( - , - , - , - , - i64, - i64); - -define @intrinsic_vfwadd.w_mask_wv_nxv4f32_nxv4f32_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv4f32_nxv4f32_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfwadd.wv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.w.mask.nxv4f32.nxv4f16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwadd.w.nxv8f32.nxv8f16( - , - , - i64); - -define @intrinsic_vfwadd.w_wv_nxv8f32_nxv8f32_nxv8f16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv8f32_nxv8f32_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfwadd.wv v8, v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.w.nxv8f32.nxv8f16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfwadd.w.mask.nxv8f32.nxv8f16( - , - , - , - , - i64, - i64); - -define @intrinsic_vfwadd.w_mask_wv_nxv8f32_nxv8f32_nxv8f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv8f32_nxv8f32_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfwadd.wv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.w.mask.nxv8f32.nxv8f16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwadd.w.nxv16f32.nxv16f16( - , - , - i64); - -define @intrinsic_vfwadd.w_wv_nxv16f32_nxv16f32_nxv16f16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv16f32_nxv16f32_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfwadd.wv v8, v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.w.nxv16f32.nxv16f16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfwadd.w.mask.nxv16f32.nxv16f16( - , - , - , - , - i64, - i64); - -define @intrinsic_vfwadd.w_mask_wv_nxv16f32_nxv16f32_nxv16f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv16f32_nxv16f32_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vl4re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vfwadd.wv v8, v16, v24, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.w.mask.nxv16f32.nxv16f16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwadd.w.nxv1f64.nxv1f32( - , - , - i64); - -define @intrinsic_vfwadd.w_wv_nxv1f64_nxv1f64_nxv1f32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv1f64_nxv1f64_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfwadd.wv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.w.nxv1f64.nxv1f32( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfwadd.w.mask.nxv1f64.nxv1f32( - , - , - , - , - i64, - i64); - -define @intrinsic_vfwadd.w_mask_wv_nxv1f64_nxv1f64_nxv1f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv1f64_nxv1f64_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfwadd.wv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.w.mask.nxv1f64.nxv1f32( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwadd.w.nxv2f64.nxv2f32( - , - , - i64); - -define @intrinsic_vfwadd.w_wv_nxv2f64_nxv2f64_nxv2f32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv2f64_nxv2f64_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfwadd.wv v8, v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.w.nxv2f64.nxv2f32( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfwadd.w.mask.nxv2f64.nxv2f32( - , - , - , - , - i64, - i64); - -define @intrinsic_vfwadd.w_mask_wv_nxv2f64_nxv2f64_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv2f64_nxv2f64_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfwadd.wv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.w.mask.nxv2f64.nxv2f32( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwadd.w.nxv4f64.nxv4f32( - , - , - i64); - -define @intrinsic_vfwadd.w_wv_nxv4f64_nxv4f64_nxv4f32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv4f64_nxv4f64_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfwadd.wv v8, v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.w.nxv4f64.nxv4f32( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfwadd.w.mask.nxv4f64.nxv4f32( - , - , - , - , - i64, - i64); - -define @intrinsic_vfwadd.w_mask_wv_nxv4f64_nxv4f64_nxv4f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv4f64_nxv4f64_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfwadd.wv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.w.mask.nxv4f64.nxv4f32( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwadd.w.nxv8f64.nxv8f32( - , - , - i64); - -define @intrinsic_vfwadd.w_wv_nxv8f64_nxv8f64_nxv8f32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv8f64_nxv8f64_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfwadd.wv v8, v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.w.nxv8f64.nxv8f32( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfwadd.w.mask.nxv8f64.nxv8f32( - , - , - , - , - i64, - i64); - -define @intrinsic_vfwadd.w_mask_wv_nxv8f64_nxv8f64_nxv8f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv8f64_nxv8f64_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vl4re32.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vfwadd.wv v8, v16, v24, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.w.mask.nxv8f64.nxv8f32( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwadd.w.nxv1f32.f16( - , - half, - i64); - -define @intrinsic_vfwadd.w_wf_nxv1f32_nxv1f32_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv1f32_nxv1f32_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfwadd.wf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.w.nxv1f32.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfwadd.w.mask.nxv1f32.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfwadd.w_mask_wf_nxv1f32_nxv1f32_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv1f32_nxv1f32_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfwadd.wf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.w.mask.nxv1f32.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwadd.w.nxv2f32.f16( - , - half, - i64); - -define @intrinsic_vfwadd.w_wf_nxv2f32_nxv2f32_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv2f32_nxv2f32_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfwadd.wf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.w.nxv2f32.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfwadd.w.mask.nxv2f32.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfwadd.w_mask_wf_nxv2f32_nxv2f32_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv2f32_nxv2f32_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfwadd.wf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.w.mask.nxv2f32.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwadd.w.nxv4f32.f16( - , - half, - i64); - -define @intrinsic_vfwadd.w_wf_nxv4f32_nxv4f32_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv4f32_nxv4f32_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfwadd.wf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.w.nxv4f32.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfwadd.w.mask.nxv4f32.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfwadd.w_mask_wf_nxv4f32_nxv4f32_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv4f32_nxv4f32_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfwadd.wf v8, v10, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.w.mask.nxv4f32.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwadd.w.nxv8f32.f16( - , - half, - i64); - -define @intrinsic_vfwadd.w_wf_nxv8f32_nxv8f32_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv8f32_nxv8f32_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfwadd.wf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.w.nxv8f32.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfwadd.w.mask.nxv8f32.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfwadd.w_mask_wf_nxv8f32_nxv8f32_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv8f32_nxv8f32_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfwadd.wf v8, v12, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.w.mask.nxv8f32.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwadd.w.nxv16f32.f16( - , - half, - i64); - -define @intrinsic_vfwadd.w_wf_nxv16f32_nxv16f32_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv16f32_nxv16f32_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfwadd.wf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.w.nxv16f32.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfwadd.w.mask.nxv16f32.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfwadd.w_mask_wf_nxv16f32_nxv16f32_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv16f32_nxv16f32_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfwadd.wf v8, v16, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.w.mask.nxv16f32.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwadd.w.nxv1f64.f32( - , - float, - i64); - -define @intrinsic_vfwadd.w_wf_nxv1f64_nxv1f64_f32( %0, float %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv1f64_nxv1f64_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfwadd.wf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.w.nxv1f64.f32( - %0, - float %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfwadd.w.mask.nxv1f64.f32( - , - , - float, - , - i64, - i64); - -define @intrinsic_vfwadd.w_mask_wf_nxv1f64_nxv1f64_f32( %0, %1, float %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv1f64_nxv1f64_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfwadd.wf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.w.mask.nxv1f64.f32( - %0, - %1, - float %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwadd.w.nxv2f64.f32( - , - float, - i64); - -define @intrinsic_vfwadd.w_wf_nxv2f64_nxv2f64_f32( %0, float %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv2f64_nxv2f64_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfwadd.wf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.w.nxv2f64.f32( - %0, - float %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfwadd.w.mask.nxv2f64.f32( - , - , - float, - , - i64, - i64); - -define @intrinsic_vfwadd.w_mask_wf_nxv2f64_nxv2f64_f32( %0, %1, float %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv2f64_nxv2f64_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfwadd.wf v8, v10, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.w.mask.nxv2f64.f32( - %0, - %1, - float %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwadd.w.nxv4f64.f32( - , - float, - i64); - -define @intrinsic_vfwadd.w_wf_nxv4f64_nxv4f64_f32( %0, float %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv4f64_nxv4f64_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfwadd.wf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.w.nxv4f64.f32( - %0, - float %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfwadd.w.mask.nxv4f64.f32( - , - , - float, - , - i64, - i64); - -define @intrinsic_vfwadd.w_mask_wf_nxv4f64_nxv4f64_f32( %0, %1, float %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv4f64_nxv4f64_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfwadd.wf v8, v12, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.w.mask.nxv4f64.f32( - %0, - %1, - float %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwadd.w.nxv8f64.f32( - , - float, - i64); - -define @intrinsic_vfwadd.w_wf_nxv8f64_nxv8f64_f32( %0, float %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv8f64_nxv8f64_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfwadd.wf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.w.nxv8f64.f32( - %0, - float %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfwadd.w.mask.nxv8f64.f32( - , - , - float, - , - i64, - i64); - -define @intrinsic_vfwadd.w_mask_wf_nxv8f64_nxv8f64_f32( %0, %1, float %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv8f64_nxv8f64_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfwadd.wf v8, v16, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.w.mask.nxv8f64.f32( - %0, - %1, - float %2, - %3, - i64 %4, i64 1) - - ret %a -} - -define @intrinsic_vfwadd.w_mask_wv_tie_nxv1f32_nxv1f32_nxv1f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_tie_nxv1f32_nxv1f32_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfwadd.wv v8, v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.w.mask.nxv1f32.nxv1f16( - %0, - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -define @intrinsic_vfwadd.w_mask_wv_tie_nxv2f32_nxv2f32_nxv2f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_tie_nxv2f32_nxv2f32_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfwadd.wv v8, v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.w.mask.nxv2f32.nxv2f16( - %0, - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -define @intrinsic_vfwadd.w_mask_wv_tie_nxv4f32_nxv4f32_nxv4f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_tie_nxv4f32_nxv4f32_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfwadd.wv v8, v8, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.w.mask.nxv4f32.nxv4f16( - %0, - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -define @intrinsic_vfwadd.w_mask_wv_tie_nxv8f32_nxv8f32_nxv8f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_tie_nxv8f32_nxv8f32_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfwadd.wv v8, v8, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.w.mask.nxv8f32.nxv8f16( - %0, - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -define @intrinsic_vfwadd.w_mask_wv_tie_nxv16f32_nxv16f32_nxv16f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_tie_nxv16f32_nxv16f32_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfwadd.wv v8, v8, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.w.mask.nxv16f32.nxv16f16( - %0, - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -define @intrinsic_vfwadd.w_mask_wv_tie_nxv1f64_nxv1f64_nxv1f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_tie_nxv1f64_nxv1f64_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfwadd.wv v8, v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.w.mask.nxv1f64.nxv1f32( - %0, - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -define @intrinsic_vfwadd.w_mask_wv_tie_nxv2f64_nxv2f64_nxv2f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_tie_nxv2f64_nxv2f64_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfwadd.wv v8, v8, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.w.mask.nxv2f64.nxv2f32( - %0, - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -define @intrinsic_vfwadd.w_mask_wv_tie_nxv4f64_nxv4f64_nxv4f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_tie_nxv4f64_nxv4f64_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfwadd.wv v8, v8, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.w.mask.nxv4f64.nxv4f32( - %0, - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -define @intrinsic_vfwadd.w_mask_wv_tie_nxv8f64_nxv8f64_nxv8f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_tie_nxv8f64_nxv8f64_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfwadd.wv v8, v8, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.w.mask.nxv8f64.nxv8f32( - %0, - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -define @intrinsic_vfwadd.w_mask_wf_tie_nxv1f32_nxv1f32_f16( %0, half %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_tie_nxv1f32_nxv1f32_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfwadd.wf v8, v8, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.w.mask.nxv1f32.f16( - %0, - %0, - half %1, - %2, - i64 %3, i64 1) - - ret %a -} - -define @intrinsic_vfwadd.w_mask_wf_tie_nxv2f32_nxv2f32_f16( %0, half %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_tie_nxv2f32_nxv2f32_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfwadd.wf v8, v8, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.w.mask.nxv2f32.f16( - %0, - %0, - half %1, - %2, - i64 %3, i64 1) - - ret %a -} - -define @intrinsic_vfwadd.w_mask_wf_tie_nxv4f32_nxv4f32_f16( %0, half %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_tie_nxv4f32_nxv4f32_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfwadd.wf v8, v8, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.w.mask.nxv4f32.f16( - %0, - %0, - half %1, - %2, - i64 %3, i64 1) - - ret %a -} - -define @intrinsic_vfwadd.w_mask_wf_tie_nxv8f32_nxv8f32_f16( %0, half %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_tie_nxv8f32_nxv8f32_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfwadd.wf v8, v8, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.w.mask.nxv8f32.f16( - %0, - %0, - half %1, - %2, - i64 %3, i64 1) - - ret %a -} - -define @intrinsic_vfwadd.w_mask_wf_tie_nxv16f32_nxv16f32_f16( %0, half %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_tie_nxv16f32_nxv16f32_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfwadd.wf v8, v8, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.w.mask.nxv16f32.f16( - %0, - %0, - half %1, - %2, - i64 %3, i64 1) - - ret %a -} - -define @intrinsic_vfwadd.w_mask_wf_tie_nxv1f64_nxv1f64_f32( %0, float %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_tie_nxv1f64_nxv1f64_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfwadd.wf v8, v8, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.w.mask.nxv1f64.f32( - %0, - %0, - float %1, - %2, - i64 %3, i64 1) - - ret %a -} - -define @intrinsic_vfwadd.w_mask_wf_tie_nxv2f64_nxv2f64_f32( %0, float %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_tie_nxv2f64_nxv2f64_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfwadd.wf v8, v8, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.w.mask.nxv2f64.f32( - %0, - %0, - float %1, - %2, - i64 %3, i64 1) - - ret %a -} - -define @intrinsic_vfwadd.w_mask_wf_tie_nxv4f64_nxv4f64_f32( %0, float %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_tie_nxv4f64_nxv4f64_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfwadd.wf v8, v8, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.w.mask.nxv4f64.f32( - %0, - %0, - float %1, - %2, - i64 %3, i64 1) - - ret %a -} - -define @intrinsic_vfwadd.w_mask_wf_tie_nxv8f64_nxv8f64_f32( %0, float %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_tie_nxv8f64_nxv8f64_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfwadd.wf v8, v8, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.w.mask.nxv8f64.f32( - %0, - %0, - float %1, - %2, - i64 %3, i64 1) - - ret %a -} - -define @intrinsic_vfwadd.w_wv_untie_nxv1f32_nxv1f32_nxv1f16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwadd.w_wv_untie_nxv1f32_nxv1f32_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfwadd.wv v10, v9, v8 -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.w.nxv1f32.nxv1f16( - %1, - %0, - i64 %2) - - ret %a -} - -define @intrinsic_vfwadd.w_wv_untie_nxv2f32_nxv2f32_nxv2f16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwadd.w_wv_untie_nxv2f32_nxv2f32_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfwadd.wv v10, v9, v8 -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.w.nxv2f32.nxv2f16( - %1, - %0, - i64 %2) - - ret %a -} - -define @intrinsic_vfwadd.w_wv_untie_nxv4f32_nxv4f32_nxv4f16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwadd.w_wv_untie_nxv4f32_nxv4f32_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfwadd.wv v12, v10, v8 -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.w.nxv4f32.nxv4f16( - %1, - %0, - i64 %2) - - ret %a -} - -define @intrinsic_vfwadd.w_wv_untie_nxv8f32_nxv8f32_nxv8f16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwadd.w_wv_untie_nxv8f32_nxv8f32_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfwadd.wv v16, v12, v8 -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.w.nxv8f32.nxv8f16( - %1, - %0, - i64 %2) - - ret %a -} - -define @intrinsic_vfwadd.w_wv_untie_nxv1f64_nxv1f64_nxv1f32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwadd.w_wv_untie_nxv1f64_nxv1f64_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfwadd.wv v10, v9, v8 -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.w.nxv1f64.nxv1f32( - %1, - %0, - i64 %2) - - ret %a -} - -define @intrinsic_vfwadd.w_wv_untie_nxv2f64_nxv2f64_nxv2f32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwadd.w_wv_untie_nxv2f64_nxv2f64_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfwadd.wv v12, v10, v8 -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.w.nxv2f64.nxv2f32( - %1, - %0, - i64 %2) - - ret %a -} - -define @intrinsic_vfwadd.w_wv_untie_nxv4f64_nxv4f64_nxv4f32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwadd.w_wv_untie_nxv4f64_nxv4f64_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfwadd.wv v16, v12, v8 -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.w.nxv4f64.nxv4f32( - %1, - %0, - i64 %2) - - ret %a -} - -define @intrinsic_vfwadd.w_wv_untie_nxv8f64_nxv8f64_nxv8f32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwadd.w_wv_untie_nxv8f64_nxv8f64_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfwadd.wv v24, v16, v8 -; CHECK-NEXT: vmv8r.v v8, v24 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwadd.w.nxv8f64.nxv8f32( - %1, - %0, - i64 %2) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwadd.w-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwadd.w.ll similarity index 88% rename from llvm/test/CodeGen/RISCV/rvv/vfwadd.w-rv32.ll rename to llvm/test/CodeGen/RISCV/rvv/vfwadd.w.ll index 3d046d2ba805..28cdfbf621b3 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfwadd.w-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwadd.w.ll @@ -1,12 +1,14 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: -target-abi=ilp32d < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfwadd.w.nxv1f32.nxv1f16( , , - i32); + iXLen); -define @intrinsic_vfwadd.w_wv_nxv1f32_nxv1f32_nxv1f16( %0, %1, i32 %2) nounwind { +define @intrinsic_vfwadd.w_wv_nxv1f32_nxv1f32_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv1f32_nxv1f32_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -16,7 +18,7 @@ entry: %a = call @llvm.riscv.vfwadd.w.nxv1f32.nxv1f16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -26,10 +28,10 @@ declare @llvm.riscv.vfwadd.w.mask.nxv1f32.nxv1f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwadd.w_mask_wv_nxv1f32_nxv1f32_nxv1f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwadd.w_mask_wv_nxv1f32_nxv1f32_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv1f32_nxv1f32_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -41,7 +43,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -49,9 +51,9 @@ entry: declare @llvm.riscv.vfwadd.w.nxv2f32.nxv2f16( , , - i32); + iXLen); -define @intrinsic_vfwadd.w_wv_nxv2f32_nxv2f32_nxv2f16( %0, %1, i32 %2) nounwind { +define @intrinsic_vfwadd.w_wv_nxv2f32_nxv2f32_nxv2f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv2f32_nxv2f32_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -61,7 +63,7 @@ entry: %a = call @llvm.riscv.vfwadd.w.nxv2f32.nxv2f16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -71,10 +73,10 @@ declare @llvm.riscv.vfwadd.w.mask.nxv2f32.nxv2f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwadd.w_mask_wv_nxv2f32_nxv2f32_nxv2f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwadd.w_mask_wv_nxv2f32_nxv2f32_nxv2f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv2f32_nxv2f32_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -86,7 +88,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -94,9 +96,9 @@ entry: declare @llvm.riscv.vfwadd.w.nxv4f32.nxv4f16( , , - i32); + iXLen); -define @intrinsic_vfwadd.w_wv_nxv4f32_nxv4f32_nxv4f16( %0, %1, i32 %2) nounwind { +define @intrinsic_vfwadd.w_wv_nxv4f32_nxv4f32_nxv4f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv4f32_nxv4f32_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -106,7 +108,7 @@ entry: %a = call @llvm.riscv.vfwadd.w.nxv4f32.nxv4f16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -116,10 +118,10 @@ declare @llvm.riscv.vfwadd.w.mask.nxv4f32.nxv4f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwadd.w_mask_wv_nxv4f32_nxv4f32_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwadd.w_mask_wv_nxv4f32_nxv4f32_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv4f32_nxv4f32_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -131,7 +133,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -139,9 +141,9 @@ entry: declare @llvm.riscv.vfwadd.w.nxv8f32.nxv8f16( , , - i32); + iXLen); -define @intrinsic_vfwadd.w_wv_nxv8f32_nxv8f32_nxv8f16( %0, %1, i32 %2) nounwind { +define @intrinsic_vfwadd.w_wv_nxv8f32_nxv8f32_nxv8f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv8f32_nxv8f32_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -151,7 +153,7 @@ entry: %a = call @llvm.riscv.vfwadd.w.nxv8f32.nxv8f16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -161,10 +163,10 @@ declare @llvm.riscv.vfwadd.w.mask.nxv8f32.nxv8f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwadd.w_mask_wv_nxv8f32_nxv8f32_nxv8f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwadd.w_mask_wv_nxv8f32_nxv8f32_nxv8f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv8f32_nxv8f32_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -176,7 +178,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -184,9 +186,9 @@ entry: declare @llvm.riscv.vfwadd.w.nxv16f32.nxv16f16( , , - i32); + iXLen); -define @intrinsic_vfwadd.w_wv_nxv16f32_nxv16f32_nxv16f16( %0, %1, i32 %2) nounwind { +define @intrinsic_vfwadd.w_wv_nxv16f32_nxv16f32_nxv16f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv16f32_nxv16f32_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -196,7 +198,7 @@ entry: %a = call @llvm.riscv.vfwadd.w.nxv16f32.nxv16f16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -206,10 +208,10 @@ declare @llvm.riscv.vfwadd.w.mask.nxv16f32.nxv16f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwadd.w_mask_wv_nxv16f32_nxv16f32_nxv16f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwadd.w_mask_wv_nxv16f32_nxv16f32_nxv16f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv16f32_nxv16f32_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl4re16.v v24, (a0) @@ -222,7 +224,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -230,9 +232,9 @@ entry: declare @llvm.riscv.vfwadd.w.nxv1f64.nxv1f32( , , - i32); + iXLen); -define @intrinsic_vfwadd.w_wv_nxv1f64_nxv1f64_nxv1f32( %0, %1, i32 %2) nounwind { +define @intrinsic_vfwadd.w_wv_nxv1f64_nxv1f64_nxv1f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv1f64_nxv1f64_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -242,7 +244,7 @@ entry: %a = call @llvm.riscv.vfwadd.w.nxv1f64.nxv1f32( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -252,10 +254,10 @@ declare @llvm.riscv.vfwadd.w.mask.nxv1f64.nxv1f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwadd.w_mask_wv_nxv1f64_nxv1f64_nxv1f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwadd.w_mask_wv_nxv1f64_nxv1f64_nxv1f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv1f64_nxv1f64_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -267,7 +269,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -275,9 +277,9 @@ entry: declare @llvm.riscv.vfwadd.w.nxv2f64.nxv2f32( , , - i32); + iXLen); -define @intrinsic_vfwadd.w_wv_nxv2f64_nxv2f64_nxv2f32( %0, %1, i32 %2) nounwind { +define @intrinsic_vfwadd.w_wv_nxv2f64_nxv2f64_nxv2f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv2f64_nxv2f64_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -287,7 +289,7 @@ entry: %a = call @llvm.riscv.vfwadd.w.nxv2f64.nxv2f32( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -297,10 +299,10 @@ declare @llvm.riscv.vfwadd.w.mask.nxv2f64.nxv2f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwadd.w_mask_wv_nxv2f64_nxv2f64_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwadd.w_mask_wv_nxv2f64_nxv2f64_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv2f64_nxv2f64_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -312,7 +314,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -320,9 +322,9 @@ entry: declare @llvm.riscv.vfwadd.w.nxv4f64.nxv4f32( , , - i32); + iXLen); -define @intrinsic_vfwadd.w_wv_nxv4f64_nxv4f64_nxv4f32( %0, %1, i32 %2) nounwind { +define @intrinsic_vfwadd.w_wv_nxv4f64_nxv4f64_nxv4f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv4f64_nxv4f64_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -332,7 +334,7 @@ entry: %a = call @llvm.riscv.vfwadd.w.nxv4f64.nxv4f32( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -342,10 +344,10 @@ declare @llvm.riscv.vfwadd.w.mask.nxv4f64.nxv4f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwadd.w_mask_wv_nxv4f64_nxv4f64_nxv4f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwadd.w_mask_wv_nxv4f64_nxv4f64_nxv4f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv4f64_nxv4f64_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -357,7 +359,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -365,9 +367,9 @@ entry: declare @llvm.riscv.vfwadd.w.nxv8f64.nxv8f32( , , - i32); + iXLen); -define @intrinsic_vfwadd.w_wv_nxv8f64_nxv8f64_nxv8f32( %0, %1, i32 %2) nounwind { +define @intrinsic_vfwadd.w_wv_nxv8f64_nxv8f64_nxv8f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv8f64_nxv8f64_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -377,7 +379,7 @@ entry: %a = call @llvm.riscv.vfwadd.w.nxv8f64.nxv8f32( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -387,10 +389,10 @@ declare @llvm.riscv.vfwadd.w.mask.nxv8f64.nxv8f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwadd.w_mask_wv_nxv8f64_nxv8f64_nxv8f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwadd.w_mask_wv_nxv8f64_nxv8f64_nxv8f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv8f64_nxv8f64_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl4re32.v v24, (a0) @@ -403,7 +405,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -411,9 +413,9 @@ entry: declare @llvm.riscv.vfwadd.w.nxv1f32.f16( , half, - i32); + iXLen); -define @intrinsic_vfwadd.w_wf_nxv1f32_nxv1f32_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfwadd.w_wf_nxv1f32_nxv1f32_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv1f32_nxv1f32_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -423,7 +425,7 @@ entry: %a = call @llvm.riscv.vfwadd.w.nxv1f32.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -433,10 +435,10 @@ declare @llvm.riscv.vfwadd.w.mask.nxv1f32.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwadd.w_mask_wf_nxv1f32_nxv1f32_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfwadd.w_mask_wf_nxv1f32_nxv1f32_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv1f32_nxv1f32_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -448,7 +450,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -456,9 +458,9 @@ entry: declare @llvm.riscv.vfwadd.w.nxv2f32.f16( , half, - i32); + iXLen); -define @intrinsic_vfwadd.w_wf_nxv2f32_nxv2f32_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfwadd.w_wf_nxv2f32_nxv2f32_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv2f32_nxv2f32_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -468,7 +470,7 @@ entry: %a = call @llvm.riscv.vfwadd.w.nxv2f32.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -478,10 +480,10 @@ declare @llvm.riscv.vfwadd.w.mask.nxv2f32.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwadd.w_mask_wf_nxv2f32_nxv2f32_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfwadd.w_mask_wf_nxv2f32_nxv2f32_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv2f32_nxv2f32_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -493,7 +495,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -501,9 +503,9 @@ entry: declare @llvm.riscv.vfwadd.w.nxv4f32.f16( , half, - i32); + iXLen); -define @intrinsic_vfwadd.w_wf_nxv4f32_nxv4f32_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfwadd.w_wf_nxv4f32_nxv4f32_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv4f32_nxv4f32_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -513,7 +515,7 @@ entry: %a = call @llvm.riscv.vfwadd.w.nxv4f32.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -523,10 +525,10 @@ declare @llvm.riscv.vfwadd.w.mask.nxv4f32.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwadd.w_mask_wf_nxv4f32_nxv4f32_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfwadd.w_mask_wf_nxv4f32_nxv4f32_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv4f32_nxv4f32_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -538,7 +540,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -546,9 +548,9 @@ entry: declare @llvm.riscv.vfwadd.w.nxv8f32.f16( , half, - i32); + iXLen); -define @intrinsic_vfwadd.w_wf_nxv8f32_nxv8f32_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfwadd.w_wf_nxv8f32_nxv8f32_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv8f32_nxv8f32_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -558,7 +560,7 @@ entry: %a = call @llvm.riscv.vfwadd.w.nxv8f32.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -568,10 +570,10 @@ declare @llvm.riscv.vfwadd.w.mask.nxv8f32.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwadd.w_mask_wf_nxv8f32_nxv8f32_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfwadd.w_mask_wf_nxv8f32_nxv8f32_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv8f32_nxv8f32_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -583,7 +585,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -591,9 +593,9 @@ entry: declare @llvm.riscv.vfwadd.w.nxv16f32.f16( , half, - i32); + iXLen); -define @intrinsic_vfwadd.w_wf_nxv16f32_nxv16f32_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfwadd.w_wf_nxv16f32_nxv16f32_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv16f32_nxv16f32_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -603,7 +605,7 @@ entry: %a = call @llvm.riscv.vfwadd.w.nxv16f32.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -613,10 +615,10 @@ declare @llvm.riscv.vfwadd.w.mask.nxv16f32.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwadd.w_mask_wf_nxv16f32_nxv16f32_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfwadd.w_mask_wf_nxv16f32_nxv16f32_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv16f32_nxv16f32_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -628,7 +630,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -636,9 +638,9 @@ entry: declare @llvm.riscv.vfwadd.w.nxv1f64.f32( , float, - i32); + iXLen); -define @intrinsic_vfwadd.w_wf_nxv1f64_nxv1f64_f32( %0, float %1, i32 %2) nounwind { +define @intrinsic_vfwadd.w_wf_nxv1f64_nxv1f64_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv1f64_nxv1f64_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -648,7 +650,7 @@ entry: %a = call @llvm.riscv.vfwadd.w.nxv1f64.f32( %0, float %1, - i32 %2) + iXLen %2) ret %a } @@ -658,10 +660,10 @@ declare @llvm.riscv.vfwadd.w.mask.nxv1f64.f32( , float, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwadd.w_mask_wf_nxv1f64_nxv1f64_f32( %0, %1, float %2, %3, i32 %4) nounwind { +define @intrinsic_vfwadd.w_mask_wf_nxv1f64_nxv1f64_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv1f64_nxv1f64_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -673,7 +675,7 @@ entry: %1, float %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -681,9 +683,9 @@ entry: declare @llvm.riscv.vfwadd.w.nxv2f64.f32( , float, - i32); + iXLen); -define @intrinsic_vfwadd.w_wf_nxv2f64_nxv2f64_f32( %0, float %1, i32 %2) nounwind { +define @intrinsic_vfwadd.w_wf_nxv2f64_nxv2f64_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv2f64_nxv2f64_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -693,7 +695,7 @@ entry: %a = call @llvm.riscv.vfwadd.w.nxv2f64.f32( %0, float %1, - i32 %2) + iXLen %2) ret %a } @@ -703,10 +705,10 @@ declare @llvm.riscv.vfwadd.w.mask.nxv2f64.f32( , float, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwadd.w_mask_wf_nxv2f64_nxv2f64_f32( %0, %1, float %2, %3, i32 %4) nounwind { +define @intrinsic_vfwadd.w_mask_wf_nxv2f64_nxv2f64_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv2f64_nxv2f64_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -718,7 +720,7 @@ entry: %1, float %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -726,9 +728,9 @@ entry: declare @llvm.riscv.vfwadd.w.nxv4f64.f32( , float, - i32); + iXLen); -define @intrinsic_vfwadd.w_wf_nxv4f64_nxv4f64_f32( %0, float %1, i32 %2) nounwind { +define @intrinsic_vfwadd.w_wf_nxv4f64_nxv4f64_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv4f64_nxv4f64_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -738,7 +740,7 @@ entry: %a = call @llvm.riscv.vfwadd.w.nxv4f64.f32( %0, float %1, - i32 %2) + iXLen %2) ret %a } @@ -748,10 +750,10 @@ declare @llvm.riscv.vfwadd.w.mask.nxv4f64.f32( , float, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwadd.w_mask_wf_nxv4f64_nxv4f64_f32( %0, %1, float %2, %3, i32 %4) nounwind { +define @intrinsic_vfwadd.w_mask_wf_nxv4f64_nxv4f64_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv4f64_nxv4f64_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -763,7 +765,7 @@ entry: %1, float %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -771,9 +773,9 @@ entry: declare @llvm.riscv.vfwadd.w.nxv8f64.f32( , float, - i32); + iXLen); -define @intrinsic_vfwadd.w_wf_nxv8f64_nxv8f64_f32( %0, float %1, i32 %2) nounwind { +define @intrinsic_vfwadd.w_wf_nxv8f64_nxv8f64_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv8f64_nxv8f64_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -783,7 +785,7 @@ entry: %a = call @llvm.riscv.vfwadd.w.nxv8f64.f32( %0, float %1, - i32 %2) + iXLen %2) ret %a } @@ -793,10 +795,10 @@ declare @llvm.riscv.vfwadd.w.mask.nxv8f64.f32( , float, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwadd.w_mask_wf_nxv8f64_nxv8f64_f32( %0, %1, float %2, %3, i32 %4) nounwind { +define @intrinsic_vfwadd.w_mask_wf_nxv8f64_nxv8f64_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv8f64_nxv8f64_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -808,12 +810,12 @@ entry: %1, float %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } -define @intrinsic_vfwadd.w_mask_wv_tie_nxv1f32_nxv1f32_nxv1f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwadd.w_mask_wv_tie_nxv1f32_nxv1f32_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_tie_nxv1f32_nxv1f32_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -825,12 +827,12 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } -define @intrinsic_vfwadd.w_mask_wv_tie_nxv2f32_nxv2f32_nxv2f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwadd.w_mask_wv_tie_nxv2f32_nxv2f32_nxv2f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_tie_nxv2f32_nxv2f32_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -842,12 +844,12 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } -define @intrinsic_vfwadd.w_mask_wv_tie_nxv4f32_nxv4f32_nxv4f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwadd.w_mask_wv_tie_nxv4f32_nxv4f32_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_tie_nxv4f32_nxv4f32_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -859,12 +861,12 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } -define @intrinsic_vfwadd.w_mask_wv_tie_nxv8f32_nxv8f32_nxv8f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwadd.w_mask_wv_tie_nxv8f32_nxv8f32_nxv8f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_tie_nxv8f32_nxv8f32_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -876,12 +878,12 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } -define @intrinsic_vfwadd.w_mask_wv_tie_nxv16f32_nxv16f32_nxv16f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwadd.w_mask_wv_tie_nxv16f32_nxv16f32_nxv16f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_tie_nxv16f32_nxv16f32_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -893,12 +895,12 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } -define @intrinsic_vfwadd.w_mask_wv_tie_nxv1f64_nxv1f64_nxv1f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwadd.w_mask_wv_tie_nxv1f64_nxv1f64_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_tie_nxv1f64_nxv1f64_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -910,12 +912,12 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } -define @intrinsic_vfwadd.w_mask_wv_tie_nxv2f64_nxv2f64_nxv2f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwadd.w_mask_wv_tie_nxv2f64_nxv2f64_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_tie_nxv2f64_nxv2f64_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -927,12 +929,12 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } -define @intrinsic_vfwadd.w_mask_wv_tie_nxv4f64_nxv4f64_nxv4f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwadd.w_mask_wv_tie_nxv4f64_nxv4f64_nxv4f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_tie_nxv4f64_nxv4f64_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -944,12 +946,12 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } -define @intrinsic_vfwadd.w_mask_wv_tie_nxv8f64_nxv8f64_nxv8f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwadd.w_mask_wv_tie_nxv8f64_nxv8f64_nxv8f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_tie_nxv8f64_nxv8f64_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -961,12 +963,12 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } -define @intrinsic_vfwadd.w_mask_wf_tie_nxv1f32_nxv1f32_f16( %0, half %1, %2, i32 %3) nounwind { +define @intrinsic_vfwadd.w_mask_wf_tie_nxv1f32_nxv1f32_f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_tie_nxv1f32_nxv1f32_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -978,12 +980,12 @@ entry: %0, half %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } -define @intrinsic_vfwadd.w_mask_wf_tie_nxv2f32_nxv2f32_f16( %0, half %1, %2, i32 %3) nounwind { +define @intrinsic_vfwadd.w_mask_wf_tie_nxv2f32_nxv2f32_f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_tie_nxv2f32_nxv2f32_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -995,12 +997,12 @@ entry: %0, half %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } -define @intrinsic_vfwadd.w_mask_wf_tie_nxv4f32_nxv4f32_f16( %0, half %1, %2, i32 %3) nounwind { +define @intrinsic_vfwadd.w_mask_wf_tie_nxv4f32_nxv4f32_f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_tie_nxv4f32_nxv4f32_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -1012,12 +1014,12 @@ entry: %0, half %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } -define @intrinsic_vfwadd.w_mask_wf_tie_nxv8f32_nxv8f32_f16( %0, half %1, %2, i32 %3) nounwind { +define @intrinsic_vfwadd.w_mask_wf_tie_nxv8f32_nxv8f32_f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_tie_nxv8f32_nxv8f32_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -1029,12 +1031,12 @@ entry: %0, half %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } -define @intrinsic_vfwadd.w_mask_wf_tie_nxv16f32_nxv16f32_f16( %0, half %1, %2, i32 %3) nounwind { +define @intrinsic_vfwadd.w_mask_wf_tie_nxv16f32_nxv16f32_f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_tie_nxv16f32_nxv16f32_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -1046,12 +1048,12 @@ entry: %0, half %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } -define @intrinsic_vfwadd.w_mask_wf_tie_nxv1f64_nxv1f64_f32( %0, float %1, %2, i32 %3) nounwind { +define @intrinsic_vfwadd.w_mask_wf_tie_nxv1f64_nxv1f64_f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_tie_nxv1f64_nxv1f64_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -1063,12 +1065,12 @@ entry: %0, float %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } -define @intrinsic_vfwadd.w_mask_wf_tie_nxv2f64_nxv2f64_f32( %0, float %1, %2, i32 %3) nounwind { +define @intrinsic_vfwadd.w_mask_wf_tie_nxv2f64_nxv2f64_f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_tie_nxv2f64_nxv2f64_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -1080,12 +1082,12 @@ entry: %0, float %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } -define @intrinsic_vfwadd.w_mask_wf_tie_nxv4f64_nxv4f64_f32( %0, float %1, %2, i32 %3) nounwind { +define @intrinsic_vfwadd.w_mask_wf_tie_nxv4f64_nxv4f64_f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_tie_nxv4f64_nxv4f64_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -1097,12 +1099,12 @@ entry: %0, float %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } -define @intrinsic_vfwadd.w_mask_wf_tie_nxv8f64_nxv8f64_f32( %0, float %1, %2, i32 %3) nounwind { +define @intrinsic_vfwadd.w_mask_wf_tie_nxv8f64_nxv8f64_f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_tie_nxv8f64_nxv8f64_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -1114,12 +1116,12 @@ entry: %0, float %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } -define @intrinsic_vfwadd.w_wv_untie_nxv1f32_nxv1f32_nxv1f16( %0, %1, i32 %2) nounwind { +define @intrinsic_vfwadd.w_wv_untie_nxv1f32_nxv1f32_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wv_untie_nxv1f32_nxv1f32_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -1130,12 +1132,12 @@ entry: %a = call @llvm.riscv.vfwadd.w.nxv1f32.nxv1f16( %1, %0, - i32 %2) + iXLen %2) ret %a } -define @intrinsic_vfwadd.w_wv_untie_nxv2f32_nxv2f32_nxv2f16( %0, %1, i32 %2) nounwind { +define @intrinsic_vfwadd.w_wv_untie_nxv2f32_nxv2f32_nxv2f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wv_untie_nxv2f32_nxv2f32_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -1146,12 +1148,12 @@ entry: %a = call @llvm.riscv.vfwadd.w.nxv2f32.nxv2f16( %1, %0, - i32 %2) + iXLen %2) ret %a } -define @intrinsic_vfwadd.w_wv_untie_nxv4f32_nxv4f32_nxv4f16( %0, %1, i32 %2) nounwind { +define @intrinsic_vfwadd.w_wv_untie_nxv4f32_nxv4f32_nxv4f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wv_untie_nxv4f32_nxv4f32_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -1162,12 +1164,12 @@ entry: %a = call @llvm.riscv.vfwadd.w.nxv4f32.nxv4f16( %1, %0, - i32 %2) + iXLen %2) ret %a } -define @intrinsic_vfwadd.w_wv_untie_nxv8f32_nxv8f32_nxv8f16( %0, %1, i32 %2) nounwind { +define @intrinsic_vfwadd.w_wv_untie_nxv8f32_nxv8f32_nxv8f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wv_untie_nxv8f32_nxv8f32_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -1178,12 +1180,12 @@ entry: %a = call @llvm.riscv.vfwadd.w.nxv8f32.nxv8f16( %1, %0, - i32 %2) + iXLen %2) ret %a } -define @intrinsic_vfwadd.w_wv_untie_nxv1f64_nxv1f64_nxv1f32( %0, %1, i32 %2) nounwind { +define @intrinsic_vfwadd.w_wv_untie_nxv1f64_nxv1f64_nxv1f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wv_untie_nxv1f64_nxv1f64_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -1194,12 +1196,12 @@ entry: %a = call @llvm.riscv.vfwadd.w.nxv1f64.nxv1f32( %1, %0, - i32 %2) + iXLen %2) ret %a } -define @intrinsic_vfwadd.w_wv_untie_nxv2f64_nxv2f64_nxv2f32( %0, %1, i32 %2) nounwind { +define @intrinsic_vfwadd.w_wv_untie_nxv2f64_nxv2f64_nxv2f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wv_untie_nxv2f64_nxv2f64_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -1210,12 +1212,12 @@ entry: %a = call @llvm.riscv.vfwadd.w.nxv2f64.nxv2f32( %1, %0, - i32 %2) + iXLen %2) ret %a } -define @intrinsic_vfwadd.w_wv_untie_nxv4f64_nxv4f64_nxv4f32( %0, %1, i32 %2) nounwind { +define @intrinsic_vfwadd.w_wv_untie_nxv4f64_nxv4f64_nxv4f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wv_untie_nxv4f64_nxv4f64_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -1226,12 +1228,12 @@ entry: %a = call @llvm.riscv.vfwadd.w.nxv4f64.nxv4f32( %1, %0, - i32 %2) + iXLen %2) ret %a } -define @intrinsic_vfwadd.w_wv_untie_nxv8f64_nxv8f64_nxv8f32( %0, %1, i32 %2) nounwind { +define @intrinsic_vfwadd.w_wv_untie_nxv8f64_nxv8f64_nxv8f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wv_untie_nxv8f64_nxv8f64_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -1242,7 +1244,7 @@ entry: %a = call @llvm.riscv.vfwadd.w.nxv8f64.nxv8f32( %1, %0, - i32 %2) + iXLen %2) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-f-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-f-rv64.ll deleted file mode 100644 index e050090b7761..000000000000 --- a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-f-rv64.ll +++ /dev/null @@ -1,380 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s -declare @llvm.riscv.vfwcvt.f.f.v.nxv1f32.nxv1f16( - , - i64); - -define @intrinsic_vfwcvt_f.f.v_nxv1f32_nxv1f16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_f.f.v_nxv1f32_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfwcvt.f.f.v v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.f.f.v.nxv1f32.nxv1f16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f32.nxv1f16( - , - , - , - i64, - i64); - -define @intrinsic_vfwcvt_mask_f.f.v_nxv1f32_nxv1f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv1f32_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfwcvt.f.f.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f32.nxv1f16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.f.f.v.nxv2f32.nxv2f16( - , - i64); - -define @intrinsic_vfwcvt_f.f.v_nxv2f32_nxv2f16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_f.f.v_nxv2f32_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfwcvt.f.f.v v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.f.f.v.nxv2f32.nxv2f16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.f.f.v.mask.nxv2f32.nxv2f16( - , - , - , - i64, - i64); - -define @intrinsic_vfwcvt_mask_f.f.v_nxv2f32_nxv2f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv2f32_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfwcvt.f.f.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv2f32.nxv2f16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.f.f.v.nxv4f32.nxv4f16( - , - i64); - -define @intrinsic_vfwcvt_f.f.v_nxv4f32_nxv4f16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_f.f.v_nxv4f32_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfwcvt.f.f.v v10, v8 -; CHECK-NEXT: vmv2r.v v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.f.f.v.nxv4f32.nxv4f16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.f.f.v.mask.nxv4f32.nxv4f16( - , - , - , - i64, - i64); - -define @intrinsic_vfwcvt_mask_f.f.v_nxv4f32_nxv4f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv4f32_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfwcvt.f.f.v v8, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv4f32.nxv4f16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.f.f.v.nxv8f32.nxv8f16( - , - i64); - -define @intrinsic_vfwcvt_f.f.v_nxv8f32_nxv8f16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_f.f.v_nxv8f32_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfwcvt.f.f.v v12, v8 -; CHECK-NEXT: vmv4r.v v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.f.f.v.nxv8f32.nxv8f16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.f.f.v.mask.nxv8f32.nxv8f16( - , - , - , - i64, - i64); - -define @intrinsic_vfwcvt_mask_f.f.v_nxv8f32_nxv8f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv8f32_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfwcvt.f.f.v v8, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv8f32.nxv8f16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.f.f.v.nxv16f32.nxv16f16( - , - i64); - -define @intrinsic_vfwcvt_f.f.v_nxv16f32_nxv16f16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_f.f.v_nxv16f32_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfwcvt.f.f.v v16, v8 -; CHECK-NEXT: vmv8r.v v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.f.f.v.nxv16f32.nxv16f16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.f.f.v.mask.nxv16f32.nxv16f16( - , - , - , - i64, - i64); - -define @intrinsic_vfwcvt_mask_f.f.v_nxv16f32_nxv16f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv16f32_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfwcvt.f.f.v v8, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv16f32.nxv16f16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.f.f.v.nxv1f64.nxv1f32( - , - i64); - -define @intrinsic_vfwcvt_f.f.v_nxv1f64_nxv1f32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_f.f.v_nxv1f64_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfwcvt.f.f.v v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.f.f.v.nxv1f64.nxv1f32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f64.nxv1f32( - , - , - , - i64, - i64); - -define @intrinsic_vfwcvt_mask_f.f.v_nxv1f64_nxv1f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv1f64_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfwcvt.f.f.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f64.nxv1f32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.f.f.v.nxv2f64.nxv2f32( - , - i64); - -define @intrinsic_vfwcvt_f.f.v_nxv2f64_nxv2f32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_f.f.v_nxv2f64_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfwcvt.f.f.v v10, v8 -; CHECK-NEXT: vmv2r.v v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.f.f.v.nxv2f64.nxv2f32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.f.f.v.mask.nxv2f64.nxv2f32( - , - , - , - i64, - i64); - -define @intrinsic_vfwcvt_mask_f.f.v_nxv2f64_nxv2f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv2f64_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfwcvt.f.f.v v8, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv2f64.nxv2f32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.f.f.v.nxv4f64.nxv4f32( - , - i64); - -define @intrinsic_vfwcvt_f.f.v_nxv4f64_nxv4f32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_f.f.v_nxv4f64_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfwcvt.f.f.v v12, v8 -; CHECK-NEXT: vmv4r.v v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.f.f.v.nxv4f64.nxv4f32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.f.f.v.mask.nxv4f64.nxv4f32( - , - , - , - i64, - i64); - -define @intrinsic_vfwcvt_mask_f.f.v_nxv4f64_nxv4f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv4f64_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfwcvt.f.f.v v8, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv4f64.nxv4f32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.f.f.v.nxv8f64.nxv8f32( - , - i64); - -define @intrinsic_vfwcvt_f.f.v_nxv8f64_nxv8f32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_f.f.v_nxv8f64_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfwcvt.f.f.v v16, v8 -; CHECK-NEXT: vmv8r.v v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.f.f.v.nxv8f64.nxv8f32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.f.f.v.mask.nxv8f64.nxv8f32( - , - , - , - i64, - i64); - -define @intrinsic_vfwcvt_mask_f.f.v_nxv8f64_nxv8f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv8f64_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfwcvt.f.f.v v8, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv8f64.nxv8f32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-f-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-f.ll similarity index 86% rename from llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-f-rv32.ll rename to llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-f.ll index 460488388b5e..386fc9a4822a 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-f-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-f.ll @@ -1,11 +1,13 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfwcvt.f.f.v.nxv1f32.nxv1f16( , - i32); + iXLen); -define @intrinsic_vfwcvt_f.f.v_nxv1f32_nxv1f16( %0, i32 %1) nounwind { +define @intrinsic_vfwcvt_f.f.v_nxv1f32_nxv1f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.f.v_nxv1f32_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -15,7 +17,7 @@ define @intrinsic_vfwcvt_f.f.v_nxv1f32_nxv1f16( @llvm.riscv.vfwcvt.f.f.v.nxv1f32.nxv1f16( %0, - i32 %1) + iXLen %1) ret %a } @@ -24,10 +26,10 @@ declare @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f32.nxv1f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwcvt_mask_f.f.v_nxv1f32_nxv1f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwcvt_mask_f.f.v_nxv1f32_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv1f32_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -38,16 +40,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfwcvt.f.f.v.nxv2f32.nxv2f16( , - i32); + iXLen); -define @intrinsic_vfwcvt_f.f.v_nxv2f32_nxv2f16( %0, i32 %1) nounwind { +define @intrinsic_vfwcvt_f.f.v_nxv2f32_nxv2f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.f.v_nxv2f32_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -57,7 +59,7 @@ define @intrinsic_vfwcvt_f.f.v_nxv2f32_nxv2f16( @llvm.riscv.vfwcvt.f.f.v.nxv2f32.nxv2f16( %0, - i32 %1) + iXLen %1) ret %a } @@ -66,10 +68,10 @@ declare @llvm.riscv.vfwcvt.f.f.v.mask.nxv2f32.nxv2f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwcvt_mask_f.f.v_nxv2f32_nxv2f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwcvt_mask_f.f.v_nxv2f32_nxv2f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv2f32_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -80,16 +82,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfwcvt.f.f.v.nxv4f32.nxv4f16( , - i32); + iXLen); -define @intrinsic_vfwcvt_f.f.v_nxv4f32_nxv4f16( %0, i32 %1) nounwind { +define @intrinsic_vfwcvt_f.f.v_nxv4f32_nxv4f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.f.v_nxv4f32_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -99,7 +101,7 @@ define @intrinsic_vfwcvt_f.f.v_nxv4f32_nxv4f16( @llvm.riscv.vfwcvt.f.f.v.nxv4f32.nxv4f16( %0, - i32 %1) + iXLen %1) ret %a } @@ -108,10 +110,10 @@ declare @llvm.riscv.vfwcvt.f.f.v.mask.nxv4f32.nxv4f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwcvt_mask_f.f.v_nxv4f32_nxv4f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwcvt_mask_f.f.v_nxv4f32_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv4f32_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -122,16 +124,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfwcvt.f.f.v.nxv8f32.nxv8f16( , - i32); + iXLen); -define @intrinsic_vfwcvt_f.f.v_nxv8f32_nxv8f16( %0, i32 %1) nounwind { +define @intrinsic_vfwcvt_f.f.v_nxv8f32_nxv8f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.f.v_nxv8f32_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -141,7 +143,7 @@ define @intrinsic_vfwcvt_f.f.v_nxv8f32_nxv8f16( @llvm.riscv.vfwcvt.f.f.v.nxv8f32.nxv8f16( %0, - i32 %1) + iXLen %1) ret %a } @@ -150,10 +152,10 @@ declare @llvm.riscv.vfwcvt.f.f.v.mask.nxv8f32.nxv8f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwcvt_mask_f.f.v_nxv8f32_nxv8f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwcvt_mask_f.f.v_nxv8f32_nxv8f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv8f32_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -164,16 +166,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfwcvt.f.f.v.nxv16f32.nxv16f16( , - i32); + iXLen); -define @intrinsic_vfwcvt_f.f.v_nxv16f32_nxv16f16( %0, i32 %1) nounwind { +define @intrinsic_vfwcvt_f.f.v_nxv16f32_nxv16f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.f.v_nxv16f32_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -183,7 +185,7 @@ define @intrinsic_vfwcvt_f.f.v_nxv16f32_nxv16f16( @llvm.riscv.vfwcvt.f.f.v.nxv16f32.nxv16f16( %0, - i32 %1) + iXLen %1) ret %a } @@ -192,10 +194,10 @@ declare @llvm.riscv.vfwcvt.f.f.v.mask.nxv16f32.nxv16f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwcvt_mask_f.f.v_nxv16f32_nxv16f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwcvt_mask_f.f.v_nxv16f32_nxv16f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv16f32_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -206,16 +208,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfwcvt.f.f.v.nxv1f64.nxv1f32( , - i32); + iXLen); -define @intrinsic_vfwcvt_f.f.v_nxv1f64_nxv1f32( %0, i32 %1) nounwind { +define @intrinsic_vfwcvt_f.f.v_nxv1f64_nxv1f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.f.v_nxv1f64_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -225,7 +227,7 @@ define @intrinsic_vfwcvt_f.f.v_nxv1f64_nxv1f32( @llvm.riscv.vfwcvt.f.f.v.nxv1f64.nxv1f32( %0, - i32 %1) + iXLen %1) ret %a } @@ -234,10 +236,10 @@ declare @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f64.nxv1f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwcvt_mask_f.f.v_nxv1f64_nxv1f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwcvt_mask_f.f.v_nxv1f64_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv1f64_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -248,16 +250,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfwcvt.f.f.v.nxv2f64.nxv2f32( , - i32); + iXLen); -define @intrinsic_vfwcvt_f.f.v_nxv2f64_nxv2f32( %0, i32 %1) nounwind { +define @intrinsic_vfwcvt_f.f.v_nxv2f64_nxv2f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.f.v_nxv2f64_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -267,7 +269,7 @@ define @intrinsic_vfwcvt_f.f.v_nxv2f64_nxv2f32( @llvm.riscv.vfwcvt.f.f.v.nxv2f64.nxv2f32( %0, - i32 %1) + iXLen %1) ret %a } @@ -276,10 +278,10 @@ declare @llvm.riscv.vfwcvt.f.f.v.mask.nxv2f64.nxv2f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwcvt_mask_f.f.v_nxv2f64_nxv2f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwcvt_mask_f.f.v_nxv2f64_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv2f64_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -290,16 +292,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfwcvt.f.f.v.nxv4f64.nxv4f32( , - i32); + iXLen); -define @intrinsic_vfwcvt_f.f.v_nxv4f64_nxv4f32( %0, i32 %1) nounwind { +define @intrinsic_vfwcvt_f.f.v_nxv4f64_nxv4f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.f.v_nxv4f64_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -309,7 +311,7 @@ define @intrinsic_vfwcvt_f.f.v_nxv4f64_nxv4f32( @llvm.riscv.vfwcvt.f.f.v.nxv4f64.nxv4f32( %0, - i32 %1) + iXLen %1) ret %a } @@ -318,10 +320,10 @@ declare @llvm.riscv.vfwcvt.f.f.v.mask.nxv4f64.nxv4f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwcvt_mask_f.f.v_nxv4f64_nxv4f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwcvt_mask_f.f.v_nxv4f64_nxv4f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv4f64_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -332,16 +334,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfwcvt.f.f.v.nxv8f64.nxv8f32( , - i32); + iXLen); -define @intrinsic_vfwcvt_f.f.v_nxv8f64_nxv8f32( %0, i32 %1) nounwind { +define @intrinsic_vfwcvt_f.f.v_nxv8f64_nxv8f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.f.v_nxv8f64_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -351,7 +353,7 @@ define @intrinsic_vfwcvt_f.f.v_nxv8f64_nxv8f32( @llvm.riscv.vfwcvt.f.f.v.nxv8f64.nxv8f32( %0, - i32 %1) + iXLen %1) ret %a } @@ -360,10 +362,10 @@ declare @llvm.riscv.vfwcvt.f.f.v.mask.nxv8f64.nxv8f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwcvt_mask_f.f.v_nxv8f64_nxv8f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwcvt_mask_f.f.v_nxv8f64_nxv8f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv8f64_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -374,7 +376,7 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-x-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-x-rv64.ll deleted file mode 100644 index e294cbe085f7..000000000000 --- a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-x-rv64.ll +++ /dev/null @@ -1,632 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s -declare @llvm.riscv.vfwcvt.f.x.v.nxv1f16.nxv1i8( - , - i64); - -define @intrinsic_vfwcvt_f.x.v_nxv1f16_nxv1i8( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv1f16_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu -; CHECK-NEXT: vfwcvt.f.x.v v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.f.x.v.nxv1f16.nxv1i8( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f16.nxv1i8( - , - , - , - i64, - i64); - -define @intrinsic_vfwcvt_mask_f.x.v_nxv1f16_nxv1i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv1f16_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu -; CHECK-NEXT: vfwcvt.f.x.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f16.nxv1i8( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.f.x.v.nxv2f16.nxv2i8( - , - i64); - -define @intrinsic_vfwcvt_f.x.v_nxv2f16_nxv2i8( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv2f16_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu -; CHECK-NEXT: vfwcvt.f.x.v v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.f.x.v.nxv2f16.nxv2i8( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.f.x.v.mask.nxv2f16.nxv2i8( - , - , - , - i64, - i64); - -define @intrinsic_vfwcvt_mask_f.x.v_nxv2f16_nxv2i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv2f16_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu -; CHECK-NEXT: vfwcvt.f.x.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv2f16.nxv2i8( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.f.x.v.nxv4f16.nxv4i8( - , - i64); - -define @intrinsic_vfwcvt_f.x.v_nxv4f16_nxv4i8( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv4f16_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu -; CHECK-NEXT: vfwcvt.f.x.v v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.f.x.v.nxv4f16.nxv4i8( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.f.x.v.mask.nxv4f16.nxv4i8( - , - , - , - i64, - i64); - -define @intrinsic_vfwcvt_mask_f.x.v_nxv4f16_nxv4i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv4f16_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu -; CHECK-NEXT: vfwcvt.f.x.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv4f16.nxv4i8( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.f.x.v.nxv8f16.nxv8i8( - , - i64); - -define @intrinsic_vfwcvt_f.x.v_nxv8f16_nxv8i8( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv8f16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu -; CHECK-NEXT: vfwcvt.f.x.v v10, v8 -; CHECK-NEXT: vmv2r.v v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.f.x.v.nxv8f16.nxv8i8( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.f.x.v.mask.nxv8f16.nxv8i8( - , - , - , - i64, - i64); - -define @intrinsic_vfwcvt_mask_f.x.v_nxv8f16_nxv8i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv8f16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu -; CHECK-NEXT: vfwcvt.f.x.v v8, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv8f16.nxv8i8( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.f.x.v.nxv16f16.nxv16i8( - , - i64); - -define @intrinsic_vfwcvt_f.x.v_nxv16f16_nxv16i8( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv16f16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu -; CHECK-NEXT: vfwcvt.f.x.v v12, v8 -; CHECK-NEXT: vmv4r.v v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.f.x.v.nxv16f16.nxv16i8( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.f.x.v.mask.nxv16f16.nxv16i8( - , - , - , - i64, - i64); - -define @intrinsic_vfwcvt_mask_f.x.v_nxv16f16_nxv16i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv16f16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu -; CHECK-NEXT: vfwcvt.f.x.v v8, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv16f16.nxv16i8( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.f.x.v.nxv32f16.nxv32i8( - , - i64); - -define @intrinsic_vfwcvt_f.x.v_nxv32f16_nxv32i8( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv32f16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu -; CHECK-NEXT: vfwcvt.f.x.v v16, v8 -; CHECK-NEXT: vmv8r.v v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.f.x.v.nxv32f16.nxv32i8( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.f.x.v.mask.nxv32f16.nxv32i8( - , - , - , - i64, - i64); - -define @intrinsic_vfwcvt_mask_f.x.v_nxv32f16_nxv32i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv32f16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu -; CHECK-NEXT: vfwcvt.f.x.v v8, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv32f16.nxv32i8( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.f.x.v.nxv1f32.nxv1i16( - , - i64); - -define @intrinsic_vfwcvt_f.x.v_nxv1f32_nxv1i16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv1f32_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfwcvt.f.x.v v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.f.x.v.nxv1f32.nxv1i16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f32.nxv1i16( - , - , - , - i64, - i64); - -define @intrinsic_vfwcvt_mask_f.x.v_nxv1f32_nxv1i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv1f32_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfwcvt.f.x.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f32.nxv1i16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.f.x.v.nxv2f32.nxv2i16( - , - i64); - -define @intrinsic_vfwcvt_f.x.v_nxv2f32_nxv2i16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv2f32_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfwcvt.f.x.v v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.f.x.v.nxv2f32.nxv2i16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.f.x.v.mask.nxv2f32.nxv2i16( - , - , - , - i64, - i64); - -define @intrinsic_vfwcvt_mask_f.x.v_nxv2f32_nxv2i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv2f32_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfwcvt.f.x.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv2f32.nxv2i16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.f.x.v.nxv4f32.nxv4i16( - , - i64); - -define @intrinsic_vfwcvt_f.x.v_nxv4f32_nxv4i16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv4f32_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfwcvt.f.x.v v10, v8 -; CHECK-NEXT: vmv2r.v v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.f.x.v.nxv4f32.nxv4i16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.f.x.v.mask.nxv4f32.nxv4i16( - , - , - , - i64, - i64); - -define @intrinsic_vfwcvt_mask_f.x.v_nxv4f32_nxv4i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv4f32_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfwcvt.f.x.v v8, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv4f32.nxv4i16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.f.x.v.nxv8f32.nxv8i16( - , - i64); - -define @intrinsic_vfwcvt_f.x.v_nxv8f32_nxv8i16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv8f32_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfwcvt.f.x.v v12, v8 -; CHECK-NEXT: vmv4r.v v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.f.x.v.nxv8f32.nxv8i16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.f.x.v.mask.nxv8f32.nxv8i16( - , - , - , - i64, - i64); - -define @intrinsic_vfwcvt_mask_f.x.v_nxv8f32_nxv8i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv8f32_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfwcvt.f.x.v v8, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv8f32.nxv8i16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.f.x.v.nxv16f32.nxv16i16( - , - i64); - -define @intrinsic_vfwcvt_f.x.v_nxv16f32_nxv16i16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv16f32_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfwcvt.f.x.v v16, v8 -; CHECK-NEXT: vmv8r.v v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.f.x.v.nxv16f32.nxv16i16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.f.x.v.mask.nxv16f32.nxv16i16( - , - , - , - i64, - i64); - -define @intrinsic_vfwcvt_mask_f.x.v_nxv16f32_nxv16i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv16f32_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfwcvt.f.x.v v8, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv16f32.nxv16i16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.f.x.v.nxv1f64.nxv1i32( - , - i64); - -define @intrinsic_vfwcvt_f.x.v_nxv1f64_nxv1i32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv1f64_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfwcvt.f.x.v v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.f.x.v.nxv1f64.nxv1i32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f64.nxv1i32( - , - , - , - i64, - i64); - -define @intrinsic_vfwcvt_mask_f.x.v_nxv1f64_nxv1i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv1f64_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfwcvt.f.x.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f64.nxv1i32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.f.x.v.nxv2f64.nxv2i32( - , - i64); - -define @intrinsic_vfwcvt_f.x.v_nxv2f64_nxv2i32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv2f64_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfwcvt.f.x.v v10, v8 -; CHECK-NEXT: vmv2r.v v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.f.x.v.nxv2f64.nxv2i32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.f.x.v.mask.nxv2f64.nxv2i32( - , - , - , - i64, - i64); - -define @intrinsic_vfwcvt_mask_f.x.v_nxv2f64_nxv2i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv2f64_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfwcvt.f.x.v v8, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv2f64.nxv2i32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.f.x.v.nxv4f64.nxv4i32( - , - i64); - -define @intrinsic_vfwcvt_f.x.v_nxv4f64_nxv4i32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv4f64_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfwcvt.f.x.v v12, v8 -; CHECK-NEXT: vmv4r.v v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.f.x.v.nxv4f64.nxv4i32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.f.x.v.mask.nxv4f64.nxv4i32( - , - , - , - i64, - i64); - -define @intrinsic_vfwcvt_mask_f.x.v_nxv4f64_nxv4i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv4f64_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfwcvt.f.x.v v8, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv4f64.nxv4i32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.f.x.v.nxv8f64.nxv8i32( - , - i64); - -define @intrinsic_vfwcvt_f.x.v_nxv8f64_nxv8i32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv8f64_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfwcvt.f.x.v v16, v8 -; CHECK-NEXT: vmv8r.v v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.f.x.v.nxv8f64.nxv8i32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.f.x.v.mask.nxv8f64.nxv8i32( - , - , - , - i64, - i64); - -define @intrinsic_vfwcvt_mask_f.x.v_nxv8f64_nxv8i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv8f64_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfwcvt.f.x.v v8, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv8f64.nxv8i32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-x-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-x.ll similarity index 87% rename from llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-x-rv32.ll rename to llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-x.ll index 467b64c20fed..ad4a3a4a5eb6 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-x-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-x.ll @@ -1,11 +1,13 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfwcvt.f.x.v.nxv1f16.nxv1i8( , - i32); + iXLen); -define @intrinsic_vfwcvt_f.x.v_nxv1f16_nxv1i8( %0, i32 %1) nounwind { +define @intrinsic_vfwcvt_f.x.v_nxv1f16_nxv1i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu @@ -15,7 +17,7 @@ define @intrinsic_vfwcvt_f.x.v_nxv1f16_nxv1i8( @llvm.riscv.vfwcvt.f.x.v.nxv1f16.nxv1i8( %0, - i32 %1) + iXLen %1) ret %a } @@ -24,10 +26,10 @@ declare @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f16.nxv1i8( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwcvt_mask_f.x.v_nxv1f16_nxv1i8( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwcvt_mask_f.x.v_nxv1f16_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu @@ -38,16 +40,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfwcvt.f.x.v.nxv2f16.nxv2i8( , - i32); + iXLen); -define @intrinsic_vfwcvt_f.x.v_nxv2f16_nxv2i8( %0, i32 %1) nounwind { +define @intrinsic_vfwcvt_f.x.v_nxv2f16_nxv2i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu @@ -57,7 +59,7 @@ define @intrinsic_vfwcvt_f.x.v_nxv2f16_nxv2i8( @llvm.riscv.vfwcvt.f.x.v.nxv2f16.nxv2i8( %0, - i32 %1) + iXLen %1) ret %a } @@ -66,10 +68,10 @@ declare @llvm.riscv.vfwcvt.f.x.v.mask.nxv2f16.nxv2i8( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwcvt_mask_f.x.v_nxv2f16_nxv2i8( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwcvt_mask_f.x.v_nxv2f16_nxv2i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu @@ -80,16 +82,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfwcvt.f.x.v.nxv4f16.nxv4i8( , - i32); + iXLen); -define @intrinsic_vfwcvt_f.x.v_nxv4f16_nxv4i8( %0, i32 %1) nounwind { +define @intrinsic_vfwcvt_f.x.v_nxv4f16_nxv4i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu @@ -99,7 +101,7 @@ define @intrinsic_vfwcvt_f.x.v_nxv4f16_nxv4i8( @llvm.riscv.vfwcvt.f.x.v.nxv4f16.nxv4i8( %0, - i32 %1) + iXLen %1) ret %a } @@ -108,10 +110,10 @@ declare @llvm.riscv.vfwcvt.f.x.v.mask.nxv4f16.nxv4i8( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwcvt_mask_f.x.v_nxv4f16_nxv4i8( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwcvt_mask_f.x.v_nxv4f16_nxv4i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu @@ -122,16 +124,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfwcvt.f.x.v.nxv8f16.nxv8i8( , - i32); + iXLen); -define @intrinsic_vfwcvt_f.x.v_nxv8f16_nxv8i8( %0, i32 %1) nounwind { +define @intrinsic_vfwcvt_f.x.v_nxv8f16_nxv8i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv8f16_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu @@ -141,7 +143,7 @@ define @intrinsic_vfwcvt_f.x.v_nxv8f16_nxv8i8( @llvm.riscv.vfwcvt.f.x.v.nxv8f16.nxv8i8( %0, - i32 %1) + iXLen %1) ret %a } @@ -150,10 +152,10 @@ declare @llvm.riscv.vfwcvt.f.x.v.mask.nxv8f16.nxv8i8( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwcvt_mask_f.x.v_nxv8f16_nxv8i8( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwcvt_mask_f.x.v_nxv8f16_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv8f16_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu @@ -164,16 +166,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfwcvt.f.x.v.nxv16f16.nxv16i8( , - i32); + iXLen); -define @intrinsic_vfwcvt_f.x.v_nxv16f16_nxv16i8( %0, i32 %1) nounwind { +define @intrinsic_vfwcvt_f.x.v_nxv16f16_nxv16i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv16f16_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu @@ -183,7 +185,7 @@ define @intrinsic_vfwcvt_f.x.v_nxv16f16_nxv16i8( @llvm.riscv.vfwcvt.f.x.v.nxv16f16.nxv16i8( %0, - i32 %1) + iXLen %1) ret %a } @@ -192,10 +194,10 @@ declare @llvm.riscv.vfwcvt.f.x.v.mask.nxv16f16.nxv16i8( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwcvt_mask_f.x.v_nxv16f16_nxv16i8( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwcvt_mask_f.x.v_nxv16f16_nxv16i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv16f16_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu @@ -206,16 +208,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfwcvt.f.x.v.nxv32f16.nxv32i8( , - i32); + iXLen); -define @intrinsic_vfwcvt_f.x.v_nxv32f16_nxv32i8( %0, i32 %1) nounwind { +define @intrinsic_vfwcvt_f.x.v_nxv32f16_nxv32i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv32f16_nxv32i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu @@ -225,7 +227,7 @@ define @intrinsic_vfwcvt_f.x.v_nxv32f16_nxv32i8( @llvm.riscv.vfwcvt.f.x.v.nxv32f16.nxv32i8( %0, - i32 %1) + iXLen %1) ret %a } @@ -234,10 +236,10 @@ declare @llvm.riscv.vfwcvt.f.x.v.mask.nxv32f16.nxv32i8( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwcvt_mask_f.x.v_nxv32f16_nxv32i8( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwcvt_mask_f.x.v_nxv32f16_nxv32i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv32f16_nxv32i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu @@ -248,16 +250,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfwcvt.f.x.v.nxv1f32.nxv1i16( , - i32); + iXLen); -define @intrinsic_vfwcvt_f.x.v_nxv1f32_nxv1i16( %0, i32 %1) nounwind { +define @intrinsic_vfwcvt_f.x.v_nxv1f32_nxv1i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -267,7 +269,7 @@ define @intrinsic_vfwcvt_f.x.v_nxv1f32_nxv1i16( @llvm.riscv.vfwcvt.f.x.v.nxv1f32.nxv1i16( %0, - i32 %1) + iXLen %1) ret %a } @@ -276,10 +278,10 @@ declare @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f32.nxv1i16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwcvt_mask_f.x.v_nxv1f32_nxv1i16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwcvt_mask_f.x.v_nxv1f32_nxv1i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -290,16 +292,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfwcvt.f.x.v.nxv2f32.nxv2i16( , - i32); + iXLen); -define @intrinsic_vfwcvt_f.x.v_nxv2f32_nxv2i16( %0, i32 %1) nounwind { +define @intrinsic_vfwcvt_f.x.v_nxv2f32_nxv2i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -309,7 +311,7 @@ define @intrinsic_vfwcvt_f.x.v_nxv2f32_nxv2i16( @llvm.riscv.vfwcvt.f.x.v.nxv2f32.nxv2i16( %0, - i32 %1) + iXLen %1) ret %a } @@ -318,10 +320,10 @@ declare @llvm.riscv.vfwcvt.f.x.v.mask.nxv2f32.nxv2i16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwcvt_mask_f.x.v_nxv2f32_nxv2i16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwcvt_mask_f.x.v_nxv2f32_nxv2i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -332,16 +334,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfwcvt.f.x.v.nxv4f32.nxv4i16( , - i32); + iXLen); -define @intrinsic_vfwcvt_f.x.v_nxv4f32_nxv4i16( %0, i32 %1) nounwind { +define @intrinsic_vfwcvt_f.x.v_nxv4f32_nxv4i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv4f32_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -351,7 +353,7 @@ define @intrinsic_vfwcvt_f.x.v_nxv4f32_nxv4i16( @llvm.riscv.vfwcvt.f.x.v.nxv4f32.nxv4i16( %0, - i32 %1) + iXLen %1) ret %a } @@ -360,10 +362,10 @@ declare @llvm.riscv.vfwcvt.f.x.v.mask.nxv4f32.nxv4i16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwcvt_mask_f.x.v_nxv4f32_nxv4i16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwcvt_mask_f.x.v_nxv4f32_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv4f32_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -374,16 +376,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfwcvt.f.x.v.nxv8f32.nxv8i16( , - i32); + iXLen); -define @intrinsic_vfwcvt_f.x.v_nxv8f32_nxv8i16( %0, i32 %1) nounwind { +define @intrinsic_vfwcvt_f.x.v_nxv8f32_nxv8i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv8f32_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -393,7 +395,7 @@ define @intrinsic_vfwcvt_f.x.v_nxv8f32_nxv8i16( @llvm.riscv.vfwcvt.f.x.v.nxv8f32.nxv8i16( %0, - i32 %1) + iXLen %1) ret %a } @@ -402,10 +404,10 @@ declare @llvm.riscv.vfwcvt.f.x.v.mask.nxv8f32.nxv8i16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwcvt_mask_f.x.v_nxv8f32_nxv8i16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwcvt_mask_f.x.v_nxv8f32_nxv8i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv8f32_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -416,16 +418,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfwcvt.f.x.v.nxv16f32.nxv16i16( , - i32); + iXLen); -define @intrinsic_vfwcvt_f.x.v_nxv16f32_nxv16i16( %0, i32 %1) nounwind { +define @intrinsic_vfwcvt_f.x.v_nxv16f32_nxv16i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv16f32_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -435,7 +437,7 @@ define @intrinsic_vfwcvt_f.x.v_nxv16f32_nxv16i16( @llvm.riscv.vfwcvt.f.x.v.nxv16f32.nxv16i16( %0, - i32 %1) + iXLen %1) ret %a } @@ -444,10 +446,10 @@ declare @llvm.riscv.vfwcvt.f.x.v.mask.nxv16f32.nxv16i16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwcvt_mask_f.x.v_nxv16f32_nxv16i16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwcvt_mask_f.x.v_nxv16f32_nxv16i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv16f32_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -458,16 +460,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfwcvt.f.x.v.nxv1f64.nxv1i32( , - i32); + iXLen); -define @intrinsic_vfwcvt_f.x.v_nxv1f64_nxv1i32( %0, i32 %1) nounwind { +define @intrinsic_vfwcvt_f.x.v_nxv1f64_nxv1i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -477,7 +479,7 @@ define @intrinsic_vfwcvt_f.x.v_nxv1f64_nxv1i32( @llvm.riscv.vfwcvt.f.x.v.nxv1f64.nxv1i32( %0, - i32 %1) + iXLen %1) ret %a } @@ -486,10 +488,10 @@ declare @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f64.nxv1i32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwcvt_mask_f.x.v_nxv1f64_nxv1i32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwcvt_mask_f.x.v_nxv1f64_nxv1i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -500,16 +502,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfwcvt.f.x.v.nxv2f64.nxv2i32( , - i32); + iXLen); -define @intrinsic_vfwcvt_f.x.v_nxv2f64_nxv2i32( %0, i32 %1) nounwind { +define @intrinsic_vfwcvt_f.x.v_nxv2f64_nxv2i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv2f64_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -519,7 +521,7 @@ define @intrinsic_vfwcvt_f.x.v_nxv2f64_nxv2i32( @llvm.riscv.vfwcvt.f.x.v.nxv2f64.nxv2i32( %0, - i32 %1) + iXLen %1) ret %a } @@ -528,10 +530,10 @@ declare @llvm.riscv.vfwcvt.f.x.v.mask.nxv2f64.nxv2i32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwcvt_mask_f.x.v_nxv2f64_nxv2i32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwcvt_mask_f.x.v_nxv2f64_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv2f64_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -542,16 +544,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfwcvt.f.x.v.nxv4f64.nxv4i32( , - i32); + iXLen); -define @intrinsic_vfwcvt_f.x.v_nxv4f64_nxv4i32( %0, i32 %1) nounwind { +define @intrinsic_vfwcvt_f.x.v_nxv4f64_nxv4i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv4f64_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -561,7 +563,7 @@ define @intrinsic_vfwcvt_f.x.v_nxv4f64_nxv4i32( @llvm.riscv.vfwcvt.f.x.v.nxv4f64.nxv4i32( %0, - i32 %1) + iXLen %1) ret %a } @@ -570,10 +572,10 @@ declare @llvm.riscv.vfwcvt.f.x.v.mask.nxv4f64.nxv4i32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwcvt_mask_f.x.v_nxv4f64_nxv4i32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwcvt_mask_f.x.v_nxv4f64_nxv4i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv4f64_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -584,16 +586,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfwcvt.f.x.v.nxv8f64.nxv8i32( , - i32); + iXLen); -define @intrinsic_vfwcvt_f.x.v_nxv8f64_nxv8i32( %0, i32 %1) nounwind { +define @intrinsic_vfwcvt_f.x.v_nxv8f64_nxv8i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv8f64_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -603,7 +605,7 @@ define @intrinsic_vfwcvt_f.x.v_nxv8f64_nxv8i32( @llvm.riscv.vfwcvt.f.x.v.nxv8f64.nxv8i32( %0, - i32 %1) + iXLen %1) ret %a } @@ -612,10 +614,10 @@ declare @llvm.riscv.vfwcvt.f.x.v.mask.nxv8f64.nxv8i32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwcvt_mask_f.x.v_nxv8f64_nxv8i32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwcvt_mask_f.x.v_nxv8f64_nxv8i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv8f64_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -626,7 +628,7 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-xu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-xu-rv64.ll deleted file mode 100644 index 107813b7879b..000000000000 --- a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-xu-rv64.ll +++ /dev/null @@ -1,632 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s -declare @llvm.riscv.vfwcvt.f.xu.v.nxv1f16.nxv1i8( - , - i64); - -define @intrinsic_vfwcvt_f.xu.v_nxv1f16_nxv1i8( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv1f16_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu -; CHECK-NEXT: vfwcvt.f.xu.v v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.f.xu.v.nxv1f16.nxv1i8( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1f16.nxv1i8( - , - , - , - i64, - i64); - -define @intrinsic_vfwcvt_mask_f.xu.v_nxv1f16_nxv1i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv1f16_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu -; CHECK-NEXT: vfwcvt.f.xu.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1f16.nxv1i8( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.f.xu.v.nxv2f16.nxv2i8( - , - i64); - -define @intrinsic_vfwcvt_f.xu.v_nxv2f16_nxv2i8( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv2f16_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu -; CHECK-NEXT: vfwcvt.f.xu.v v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.f.xu.v.nxv2f16.nxv2i8( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2f16.nxv2i8( - , - , - , - i64, - i64); - -define @intrinsic_vfwcvt_mask_f.xu.v_nxv2f16_nxv2i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv2f16_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu -; CHECK-NEXT: vfwcvt.f.xu.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2f16.nxv2i8( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.f.xu.v.nxv4f16.nxv4i8( - , - i64); - -define @intrinsic_vfwcvt_f.xu.v_nxv4f16_nxv4i8( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv4f16_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu -; CHECK-NEXT: vfwcvt.f.xu.v v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.f.xu.v.nxv4f16.nxv4i8( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4f16.nxv4i8( - , - , - , - i64, - i64); - -define @intrinsic_vfwcvt_mask_f.xu.v_nxv4f16_nxv4i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv4f16_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu -; CHECK-NEXT: vfwcvt.f.xu.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4f16.nxv4i8( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.f.xu.v.nxv8f16.nxv8i8( - , - i64); - -define @intrinsic_vfwcvt_f.xu.v_nxv8f16_nxv8i8( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv8f16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu -; CHECK-NEXT: vfwcvt.f.xu.v v10, v8 -; CHECK-NEXT: vmv2r.v v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.f.xu.v.nxv8f16.nxv8i8( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8f16.nxv8i8( - , - , - , - i64, - i64); - -define @intrinsic_vfwcvt_mask_f.xu.v_nxv8f16_nxv8i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv8f16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu -; CHECK-NEXT: vfwcvt.f.xu.v v8, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8f16.nxv8i8( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.f.xu.v.nxv16f16.nxv16i8( - , - i64); - -define @intrinsic_vfwcvt_f.xu.v_nxv16f16_nxv16i8( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv16f16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu -; CHECK-NEXT: vfwcvt.f.xu.v v12, v8 -; CHECK-NEXT: vmv4r.v v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.f.xu.v.nxv16f16.nxv16i8( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.f.xu.v.mask.nxv16f16.nxv16i8( - , - , - , - i64, - i64); - -define @intrinsic_vfwcvt_mask_f.xu.v_nxv16f16_nxv16i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv16f16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu -; CHECK-NEXT: vfwcvt.f.xu.v v8, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv16f16.nxv16i8( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.f.xu.v.nxv32f16.nxv32i8( - , - i64); - -define @intrinsic_vfwcvt_f.xu.v_nxv32f16_nxv32i8( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv32f16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu -; CHECK-NEXT: vfwcvt.f.xu.v v16, v8 -; CHECK-NEXT: vmv8r.v v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.f.xu.v.nxv32f16.nxv32i8( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.f.xu.v.mask.nxv32f16.nxv32i8( - , - , - , - i64, - i64); - -define @intrinsic_vfwcvt_mask_f.xu.v_nxv32f16_nxv32i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv32f16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu -; CHECK-NEXT: vfwcvt.f.xu.v v8, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv32f16.nxv32i8( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.f.xu.v.nxv1f32.nxv1i16( - , - i64); - -define @intrinsic_vfwcvt_f.xu.v_nxv1f32_nxv1i16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv1f32_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfwcvt.f.xu.v v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.f.xu.v.nxv1f32.nxv1i16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1f32.nxv1i16( - , - , - , - i64, - i64); - -define @intrinsic_vfwcvt_mask_f.xu.v_nxv1f32_nxv1i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv1f32_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfwcvt.f.xu.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1f32.nxv1i16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.f.xu.v.nxv2f32.nxv2i16( - , - i64); - -define @intrinsic_vfwcvt_f.xu.v_nxv2f32_nxv2i16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv2f32_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfwcvt.f.xu.v v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.f.xu.v.nxv2f32.nxv2i16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2f32.nxv2i16( - , - , - , - i64, - i64); - -define @intrinsic_vfwcvt_mask_f.xu.v_nxv2f32_nxv2i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv2f32_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfwcvt.f.xu.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2f32.nxv2i16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.f.xu.v.nxv4f32.nxv4i16( - , - i64); - -define @intrinsic_vfwcvt_f.xu.v_nxv4f32_nxv4i16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv4f32_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfwcvt.f.xu.v v10, v8 -; CHECK-NEXT: vmv2r.v v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.f.xu.v.nxv4f32.nxv4i16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4f32.nxv4i16( - , - , - , - i64, - i64); - -define @intrinsic_vfwcvt_mask_f.xu.v_nxv4f32_nxv4i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv4f32_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfwcvt.f.xu.v v8, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4f32.nxv4i16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.f.xu.v.nxv8f32.nxv8i16( - , - i64); - -define @intrinsic_vfwcvt_f.xu.v_nxv8f32_nxv8i16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv8f32_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfwcvt.f.xu.v v12, v8 -; CHECK-NEXT: vmv4r.v v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.f.xu.v.nxv8f32.nxv8i16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8f32.nxv8i16( - , - , - , - i64, - i64); - -define @intrinsic_vfwcvt_mask_f.xu.v_nxv8f32_nxv8i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv8f32_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfwcvt.f.xu.v v8, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8f32.nxv8i16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.f.xu.v.nxv16f32.nxv16i16( - , - i64); - -define @intrinsic_vfwcvt_f.xu.v_nxv16f32_nxv16i16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv16f32_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfwcvt.f.xu.v v16, v8 -; CHECK-NEXT: vmv8r.v v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.f.xu.v.nxv16f32.nxv16i16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.f.xu.v.mask.nxv16f32.nxv16i16( - , - , - , - i64, - i64); - -define @intrinsic_vfwcvt_mask_f.xu.v_nxv16f32_nxv16i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv16f32_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfwcvt.f.xu.v v8, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv16f32.nxv16i16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.f.xu.v.nxv1f64.nxv1i32( - , - i64); - -define @intrinsic_vfwcvt_f.xu.v_nxv1f64_nxv1i32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv1f64_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfwcvt.f.xu.v v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.f.xu.v.nxv1f64.nxv1i32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1f64.nxv1i32( - , - , - , - i64, - i64); - -define @intrinsic_vfwcvt_mask_f.xu.v_nxv1f64_nxv1i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv1f64_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfwcvt.f.xu.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1f64.nxv1i32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.f.xu.v.nxv2f64.nxv2i32( - , - i64); - -define @intrinsic_vfwcvt_f.xu.v_nxv2f64_nxv2i32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv2f64_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfwcvt.f.xu.v v10, v8 -; CHECK-NEXT: vmv2r.v v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.f.xu.v.nxv2f64.nxv2i32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2f64.nxv2i32( - , - , - , - i64, - i64); - -define @intrinsic_vfwcvt_mask_f.xu.v_nxv2f64_nxv2i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv2f64_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfwcvt.f.xu.v v8, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2f64.nxv2i32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.f.xu.v.nxv4f64.nxv4i32( - , - i64); - -define @intrinsic_vfwcvt_f.xu.v_nxv4f64_nxv4i32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv4f64_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfwcvt.f.xu.v v12, v8 -; CHECK-NEXT: vmv4r.v v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.f.xu.v.nxv4f64.nxv4i32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4f64.nxv4i32( - , - , - , - i64, - i64); - -define @intrinsic_vfwcvt_mask_f.xu.v_nxv4f64_nxv4i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv4f64_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfwcvt.f.xu.v v8, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4f64.nxv4i32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.f.xu.v.nxv8f64.nxv8i32( - , - i64); - -define @intrinsic_vfwcvt_f.xu.v_nxv8f64_nxv8i32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv8f64_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfwcvt.f.xu.v v16, v8 -; CHECK-NEXT: vmv8r.v v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.f.xu.v.nxv8f64.nxv8i32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8f64.nxv8i32( - , - , - , - i64, - i64); - -define @intrinsic_vfwcvt_mask_f.xu.v_nxv8f64_nxv8i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv8f64_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfwcvt.f.xu.v v8, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8f64.nxv8i32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-xu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-xu.ll similarity index 87% rename from llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-xu-rv32.ll rename to llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-xu.ll index fc0af066c39e..9eef34d4de1e 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-xu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-xu.ll @@ -1,11 +1,13 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfwcvt.f.xu.v.nxv1f16.nxv1i8( , - i32); + iXLen); -define @intrinsic_vfwcvt_f.xu.v_nxv1f16_nxv1i8( %0, i32 %1) nounwind { +define @intrinsic_vfwcvt_f.xu.v_nxv1f16_nxv1i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu @@ -15,7 +17,7 @@ define @intrinsic_vfwcvt_f.xu.v_nxv1f16_nxv1i8( @llvm.riscv.vfwcvt.f.xu.v.nxv1f16.nxv1i8( %0, - i32 %1) + iXLen %1) ret %a } @@ -24,10 +26,10 @@ declare @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1f16.nxv1i8( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwcvt_mask_f.xu.v_nxv1f16_nxv1i8( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwcvt_mask_f.xu.v_nxv1f16_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu @@ -38,16 +40,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfwcvt.f.xu.v.nxv2f16.nxv2i8( , - i32); + iXLen); -define @intrinsic_vfwcvt_f.xu.v_nxv2f16_nxv2i8( %0, i32 %1) nounwind { +define @intrinsic_vfwcvt_f.xu.v_nxv2f16_nxv2i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu @@ -57,7 +59,7 @@ define @intrinsic_vfwcvt_f.xu.v_nxv2f16_nxv2i8( @llvm.riscv.vfwcvt.f.xu.v.nxv2f16.nxv2i8( %0, - i32 %1) + iXLen %1) ret %a } @@ -66,10 +68,10 @@ declare @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2f16.nxv2i8( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwcvt_mask_f.xu.v_nxv2f16_nxv2i8( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwcvt_mask_f.xu.v_nxv2f16_nxv2i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu @@ -80,16 +82,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfwcvt.f.xu.v.nxv4f16.nxv4i8( , - i32); + iXLen); -define @intrinsic_vfwcvt_f.xu.v_nxv4f16_nxv4i8( %0, i32 %1) nounwind { +define @intrinsic_vfwcvt_f.xu.v_nxv4f16_nxv4i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu @@ -99,7 +101,7 @@ define @intrinsic_vfwcvt_f.xu.v_nxv4f16_nxv4i8( @llvm.riscv.vfwcvt.f.xu.v.nxv4f16.nxv4i8( %0, - i32 %1) + iXLen %1) ret %a } @@ -108,10 +110,10 @@ declare @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4f16.nxv4i8( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwcvt_mask_f.xu.v_nxv4f16_nxv4i8( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwcvt_mask_f.xu.v_nxv4f16_nxv4i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu @@ -122,16 +124,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfwcvt.f.xu.v.nxv8f16.nxv8i8( , - i32); + iXLen); -define @intrinsic_vfwcvt_f.xu.v_nxv8f16_nxv8i8( %0, i32 %1) nounwind { +define @intrinsic_vfwcvt_f.xu.v_nxv8f16_nxv8i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv8f16_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu @@ -141,7 +143,7 @@ define @intrinsic_vfwcvt_f.xu.v_nxv8f16_nxv8i8( @llvm.riscv.vfwcvt.f.xu.v.nxv8f16.nxv8i8( %0, - i32 %1) + iXLen %1) ret %a } @@ -150,10 +152,10 @@ declare @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8f16.nxv8i8( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwcvt_mask_f.xu.v_nxv8f16_nxv8i8( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwcvt_mask_f.xu.v_nxv8f16_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv8f16_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu @@ -164,16 +166,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfwcvt.f.xu.v.nxv16f16.nxv16i8( , - i32); + iXLen); -define @intrinsic_vfwcvt_f.xu.v_nxv16f16_nxv16i8( %0, i32 %1) nounwind { +define @intrinsic_vfwcvt_f.xu.v_nxv16f16_nxv16i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv16f16_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu @@ -183,7 +185,7 @@ define @intrinsic_vfwcvt_f.xu.v_nxv16f16_nxv16i8( @llvm.riscv.vfwcvt.f.xu.v.nxv16f16.nxv16i8( %0, - i32 %1) + iXLen %1) ret %a } @@ -192,10 +194,10 @@ declare @llvm.riscv.vfwcvt.f.xu.v.mask.nxv16f16.nxv16i8( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwcvt_mask_f.xu.v_nxv16f16_nxv16i8( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwcvt_mask_f.xu.v_nxv16f16_nxv16i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv16f16_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu @@ -206,16 +208,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfwcvt.f.xu.v.nxv32f16.nxv32i8( , - i32); + iXLen); -define @intrinsic_vfwcvt_f.xu.v_nxv32f16_nxv32i8( %0, i32 %1) nounwind { +define @intrinsic_vfwcvt_f.xu.v_nxv32f16_nxv32i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv32f16_nxv32i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu @@ -225,7 +227,7 @@ define @intrinsic_vfwcvt_f.xu.v_nxv32f16_nxv32i8( @llvm.riscv.vfwcvt.f.xu.v.nxv32f16.nxv32i8( %0, - i32 %1) + iXLen %1) ret %a } @@ -234,10 +236,10 @@ declare @llvm.riscv.vfwcvt.f.xu.v.mask.nxv32f16.nxv32i8( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwcvt_mask_f.xu.v_nxv32f16_nxv32i8( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwcvt_mask_f.xu.v_nxv32f16_nxv32i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv32f16_nxv32i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu @@ -248,16 +250,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfwcvt.f.xu.v.nxv1f32.nxv1i16( , - i32); + iXLen); -define @intrinsic_vfwcvt_f.xu.v_nxv1f32_nxv1i16( %0, i32 %1) nounwind { +define @intrinsic_vfwcvt_f.xu.v_nxv1f32_nxv1i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -267,7 +269,7 @@ define @intrinsic_vfwcvt_f.xu.v_nxv1f32_nxv1i16( @llvm.riscv.vfwcvt.f.xu.v.nxv1f32.nxv1i16( %0, - i32 %1) + iXLen %1) ret %a } @@ -276,10 +278,10 @@ declare @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1f32.nxv1i16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwcvt_mask_f.xu.v_nxv1f32_nxv1i16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwcvt_mask_f.xu.v_nxv1f32_nxv1i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -290,16 +292,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfwcvt.f.xu.v.nxv2f32.nxv2i16( , - i32); + iXLen); -define @intrinsic_vfwcvt_f.xu.v_nxv2f32_nxv2i16( %0, i32 %1) nounwind { +define @intrinsic_vfwcvt_f.xu.v_nxv2f32_nxv2i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -309,7 +311,7 @@ define @intrinsic_vfwcvt_f.xu.v_nxv2f32_nxv2i16( @llvm.riscv.vfwcvt.f.xu.v.nxv2f32.nxv2i16( %0, - i32 %1) + iXLen %1) ret %a } @@ -318,10 +320,10 @@ declare @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2f32.nxv2i16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwcvt_mask_f.xu.v_nxv2f32_nxv2i16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwcvt_mask_f.xu.v_nxv2f32_nxv2i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -332,16 +334,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfwcvt.f.xu.v.nxv4f32.nxv4i16( , - i32); + iXLen); -define @intrinsic_vfwcvt_f.xu.v_nxv4f32_nxv4i16( %0, i32 %1) nounwind { +define @intrinsic_vfwcvt_f.xu.v_nxv4f32_nxv4i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv4f32_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -351,7 +353,7 @@ define @intrinsic_vfwcvt_f.xu.v_nxv4f32_nxv4i16( @llvm.riscv.vfwcvt.f.xu.v.nxv4f32.nxv4i16( %0, - i32 %1) + iXLen %1) ret %a } @@ -360,10 +362,10 @@ declare @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4f32.nxv4i16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwcvt_mask_f.xu.v_nxv4f32_nxv4i16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwcvt_mask_f.xu.v_nxv4f32_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv4f32_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -374,16 +376,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfwcvt.f.xu.v.nxv8f32.nxv8i16( , - i32); + iXLen); -define @intrinsic_vfwcvt_f.xu.v_nxv8f32_nxv8i16( %0, i32 %1) nounwind { +define @intrinsic_vfwcvt_f.xu.v_nxv8f32_nxv8i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv8f32_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -393,7 +395,7 @@ define @intrinsic_vfwcvt_f.xu.v_nxv8f32_nxv8i16( @llvm.riscv.vfwcvt.f.xu.v.nxv8f32.nxv8i16( %0, - i32 %1) + iXLen %1) ret %a } @@ -402,10 +404,10 @@ declare @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8f32.nxv8i16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwcvt_mask_f.xu.v_nxv8f32_nxv8i16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwcvt_mask_f.xu.v_nxv8f32_nxv8i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv8f32_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -416,16 +418,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfwcvt.f.xu.v.nxv16f32.nxv16i16( , - i32); + iXLen); -define @intrinsic_vfwcvt_f.xu.v_nxv16f32_nxv16i16( %0, i32 %1) nounwind { +define @intrinsic_vfwcvt_f.xu.v_nxv16f32_nxv16i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv16f32_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -435,7 +437,7 @@ define @intrinsic_vfwcvt_f.xu.v_nxv16f32_nxv16i16( @llvm.riscv.vfwcvt.f.xu.v.nxv16f32.nxv16i16( %0, - i32 %1) + iXLen %1) ret %a } @@ -444,10 +446,10 @@ declare @llvm.riscv.vfwcvt.f.xu.v.mask.nxv16f32.nxv16i16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwcvt_mask_f.xu.v_nxv16f32_nxv16i16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwcvt_mask_f.xu.v_nxv16f32_nxv16i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv16f32_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -458,16 +460,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfwcvt.f.xu.v.nxv1f64.nxv1i32( , - i32); + iXLen); -define @intrinsic_vfwcvt_f.xu.v_nxv1f64_nxv1i32( %0, i32 %1) nounwind { +define @intrinsic_vfwcvt_f.xu.v_nxv1f64_nxv1i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -477,7 +479,7 @@ define @intrinsic_vfwcvt_f.xu.v_nxv1f64_nxv1i32( @llvm.riscv.vfwcvt.f.xu.v.nxv1f64.nxv1i32( %0, - i32 %1) + iXLen %1) ret %a } @@ -486,10 +488,10 @@ declare @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1f64.nxv1i32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwcvt_mask_f.xu.v_nxv1f64_nxv1i32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwcvt_mask_f.xu.v_nxv1f64_nxv1i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -500,16 +502,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfwcvt.f.xu.v.nxv2f64.nxv2i32( , - i32); + iXLen); -define @intrinsic_vfwcvt_f.xu.v_nxv2f64_nxv2i32( %0, i32 %1) nounwind { +define @intrinsic_vfwcvt_f.xu.v_nxv2f64_nxv2i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv2f64_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -519,7 +521,7 @@ define @intrinsic_vfwcvt_f.xu.v_nxv2f64_nxv2i32( @llvm.riscv.vfwcvt.f.xu.v.nxv2f64.nxv2i32( %0, - i32 %1) + iXLen %1) ret %a } @@ -528,10 +530,10 @@ declare @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2f64.nxv2i32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwcvt_mask_f.xu.v_nxv2f64_nxv2i32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwcvt_mask_f.xu.v_nxv2f64_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv2f64_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -542,16 +544,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfwcvt.f.xu.v.nxv4f64.nxv4i32( , - i32); + iXLen); -define @intrinsic_vfwcvt_f.xu.v_nxv4f64_nxv4i32( %0, i32 %1) nounwind { +define @intrinsic_vfwcvt_f.xu.v_nxv4f64_nxv4i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv4f64_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -561,7 +563,7 @@ define @intrinsic_vfwcvt_f.xu.v_nxv4f64_nxv4i32( @llvm.riscv.vfwcvt.f.xu.v.nxv4f64.nxv4i32( %0, - i32 %1) + iXLen %1) ret %a } @@ -570,10 +572,10 @@ declare @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4f64.nxv4i32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwcvt_mask_f.xu.v_nxv4f64_nxv4i32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwcvt_mask_f.xu.v_nxv4f64_nxv4i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv4f64_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -584,16 +586,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfwcvt.f.xu.v.nxv8f64.nxv8i32( , - i32); + iXLen); -define @intrinsic_vfwcvt_f.xu.v_nxv8f64_nxv8i32( %0, i32 %1) nounwind { +define @intrinsic_vfwcvt_f.xu.v_nxv8f64_nxv8i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv8f64_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -603,7 +605,7 @@ define @intrinsic_vfwcvt_f.xu.v_nxv8f64_nxv8i32( @llvm.riscv.vfwcvt.f.xu.v.nxv8f64.nxv8i32( %0, - i32 %1) + iXLen %1) ret %a } @@ -612,10 +614,10 @@ declare @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8f64.nxv8i32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwcvt_mask_f.xu.v_nxv8f64_nxv8i32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwcvt_mask_f.xu.v_nxv8f64_nxv8i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv8f64_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -626,7 +628,7 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-x-f-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-x-f-rv64.ll deleted file mode 100644 index 4d551f62ec52..000000000000 --- a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-x-f-rv64.ll +++ /dev/null @@ -1,380 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s -declare @llvm.riscv.vfwcvt.rtz.x.f.v.nxv1i32.nxv1f16( - , - i64); - -define @intrinsic_vfwcvt_rtz.x.f.v_nxv1i32_nxv1f16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_rtz.x.f.v_nxv1i32_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfwcvt.rtz.x.f.v v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv1i32.nxv1f16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv1i32.nxv1f16( - , - , - , - i64, - i64); - -define @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv1i32_nxv1f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.x.f.v_nxv1i32_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfwcvt.rtz.x.f.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv1i32.nxv1f16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.rtz.x.f.v.nxv2i32.nxv2f16( - , - i64); - -define @intrinsic_vfwcvt_rtz.x.f.v_nxv2i32_nxv2f16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_rtz.x.f.v_nxv2i32_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfwcvt.rtz.x.f.v v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv2i32.nxv2f16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv2i32.nxv2f16( - , - , - , - i64, - i64); - -define @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv2i32_nxv2f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.x.f.v_nxv2i32_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfwcvt.rtz.x.f.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv2i32.nxv2f16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.rtz.x.f.v.nxv4i32.nxv4f16( - , - i64); - -define @intrinsic_vfwcvt_rtz.x.f.v_nxv4i32_nxv4f16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_rtz.x.f.v_nxv4i32_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfwcvt.rtz.x.f.v v10, v8 -; CHECK-NEXT: vmv2r.v v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv4i32.nxv4f16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv4i32.nxv4f16( - , - , - , - i64, - i64); - -define @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv4i32_nxv4f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.x.f.v_nxv4i32_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfwcvt.rtz.x.f.v v8, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv4i32.nxv4f16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.rtz.x.f.v.nxv8i32.nxv8f16( - , - i64); - -define @intrinsic_vfwcvt_rtz.x.f.v_nxv8i32_nxv8f16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_rtz.x.f.v_nxv8i32_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfwcvt.rtz.x.f.v v12, v8 -; CHECK-NEXT: vmv4r.v v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv8i32.nxv8f16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv8i32.nxv8f16( - , - , - , - i64, - i64); - -define @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv8i32_nxv8f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.x.f.v_nxv8i32_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfwcvt.rtz.x.f.v v8, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv8i32.nxv8f16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.rtz.x.f.v.nxv16i32.nxv16f16( - , - i64); - -define @intrinsic_vfwcvt_rtz.x.f.v_nxv16i32_nxv16f16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_rtz.x.f.v_nxv16i32_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfwcvt.rtz.x.f.v v16, v8 -; CHECK-NEXT: vmv8r.v v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv16i32.nxv16f16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv16i32.nxv16f16( - , - , - , - i64, - i64); - -define @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv16i32_nxv16f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.x.f.v_nxv16i32_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfwcvt.rtz.x.f.v v8, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv16i32.nxv16f16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.rtz.x.f.v.nxv1i64.nxv1f32( - , - i64); - -define @intrinsic_vfwcvt_rtz.x.f.v_nxv1i64_nxv1f32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_rtz.x.f.v_nxv1i64_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfwcvt.rtz.x.f.v v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv1i64.nxv1f32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv1i64.nxv1f32( - , - , - , - i64, - i64); - -define @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv1i64_nxv1f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.x.f.v_nxv1i64_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfwcvt.rtz.x.f.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv1i64.nxv1f32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.rtz.x.f.v.nxv2i64.nxv2f32( - , - i64); - -define @intrinsic_vfwcvt_rtz.x.f.v_nxv2i64_nxv2f32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_rtz.x.f.v_nxv2i64_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfwcvt.rtz.x.f.v v10, v8 -; CHECK-NEXT: vmv2r.v v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv2i64.nxv2f32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv2i64.nxv2f32( - , - , - , - i64, - i64); - -define @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv2i64_nxv2f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.x.f.v_nxv2i64_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfwcvt.rtz.x.f.v v8, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv2i64.nxv2f32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.rtz.x.f.v.nxv4i64.nxv4f32( - , - i64); - -define @intrinsic_vfwcvt_rtz.x.f.v_nxv4i64_nxv4f32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_rtz.x.f.v_nxv4i64_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfwcvt.rtz.x.f.v v12, v8 -; CHECK-NEXT: vmv4r.v v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv4i64.nxv4f32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv4i64.nxv4f32( - , - , - , - i64, - i64); - -define @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv4i64_nxv4f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.x.f.v_nxv4i64_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfwcvt.rtz.x.f.v v8, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv4i64.nxv4f32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.rtz.x.f.v.nxv8i64.nxv8f32( - , - i64); - -define @intrinsic_vfwcvt_rtz.x.f.v_nxv8i64_nxv8f32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_rtz.x.f.v_nxv8i64_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfwcvt.rtz.x.f.v v16, v8 -; CHECK-NEXT: vmv8r.v v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv8i64.nxv8f32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv8i64.nxv8f32( - , - , - , - i64, - i64); - -define @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv8i64_nxv8f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.x.f.v_nxv8i64_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfwcvt.rtz.x.f.v v8, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv8i64.nxv8f32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-x-f-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-x-f.ll similarity index 87% rename from llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-x-f-rv32.ll rename to llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-x-f.ll index d6cf1b356310..0f7a46aadfd1 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-x-f-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-x-f.ll @@ -1,11 +1,13 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfwcvt.rtz.x.f.v.nxv1i32.nxv1f16( , - i32); + iXLen); -define @intrinsic_vfwcvt_rtz.x.f.v_nxv1i32_nxv1f16( %0, i32 %1) nounwind { +define @intrinsic_vfwcvt_rtz.x.f.v_nxv1i32_nxv1f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_rtz.x.f.v_nxv1i32_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -15,7 +17,7 @@ define @intrinsic_vfwcvt_rtz.x.f.v_nxv1i32_nxv1f16( @llvm.riscv.vfwcvt.rtz.x.f.v.nxv1i32.nxv1f16( %0, - i32 %1) + iXLen %1) ret %a } @@ -24,10 +26,10 @@ declare @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv1i32.nxv1f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv1i32_nxv1f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv1i32_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.x.f.v_nxv1i32_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -38,16 +40,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfwcvt.rtz.x.f.v.nxv2i32.nxv2f16( , - i32); + iXLen); -define @intrinsic_vfwcvt_rtz.x.f.v_nxv2i32_nxv2f16( %0, i32 %1) nounwind { +define @intrinsic_vfwcvt_rtz.x.f.v_nxv2i32_nxv2f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_rtz.x.f.v_nxv2i32_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -57,7 +59,7 @@ define @intrinsic_vfwcvt_rtz.x.f.v_nxv2i32_nxv2f16( @llvm.riscv.vfwcvt.rtz.x.f.v.nxv2i32.nxv2f16( %0, - i32 %1) + iXLen %1) ret %a } @@ -66,10 +68,10 @@ declare @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv2i32.nxv2f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv2i32_nxv2f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv2i32_nxv2f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.x.f.v_nxv2i32_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -80,16 +82,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfwcvt.rtz.x.f.v.nxv4i32.nxv4f16( , - i32); + iXLen); -define @intrinsic_vfwcvt_rtz.x.f.v_nxv4i32_nxv4f16( %0, i32 %1) nounwind { +define @intrinsic_vfwcvt_rtz.x.f.v_nxv4i32_nxv4f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_rtz.x.f.v_nxv4i32_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -99,7 +101,7 @@ define @intrinsic_vfwcvt_rtz.x.f.v_nxv4i32_nxv4f16( @llvm.riscv.vfwcvt.rtz.x.f.v.nxv4i32.nxv4f16( %0, - i32 %1) + iXLen %1) ret %a } @@ -108,10 +110,10 @@ declare @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv4i32.nxv4f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv4i32_nxv4f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv4i32_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.x.f.v_nxv4i32_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -122,16 +124,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfwcvt.rtz.x.f.v.nxv8i32.nxv8f16( , - i32); + iXLen); -define @intrinsic_vfwcvt_rtz.x.f.v_nxv8i32_nxv8f16( %0, i32 %1) nounwind { +define @intrinsic_vfwcvt_rtz.x.f.v_nxv8i32_nxv8f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_rtz.x.f.v_nxv8i32_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -141,7 +143,7 @@ define @intrinsic_vfwcvt_rtz.x.f.v_nxv8i32_nxv8f16( @llvm.riscv.vfwcvt.rtz.x.f.v.nxv8i32.nxv8f16( %0, - i32 %1) + iXLen %1) ret %a } @@ -150,10 +152,10 @@ declare @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv8i32.nxv8f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv8i32_nxv8f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv8i32_nxv8f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.x.f.v_nxv8i32_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -164,16 +166,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfwcvt.rtz.x.f.v.nxv16i32.nxv16f16( , - i32); + iXLen); -define @intrinsic_vfwcvt_rtz.x.f.v_nxv16i32_nxv16f16( %0, i32 %1) nounwind { +define @intrinsic_vfwcvt_rtz.x.f.v_nxv16i32_nxv16f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_rtz.x.f.v_nxv16i32_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -183,7 +185,7 @@ define @intrinsic_vfwcvt_rtz.x.f.v_nxv16i32_nxv16f16( @llvm.riscv.vfwcvt.rtz.x.f.v.nxv16i32.nxv16f16( %0, - i32 %1) + iXLen %1) ret %a } @@ -192,10 +194,10 @@ declare @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv16i32.nxv16f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv16i32_nxv16f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv16i32_nxv16f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.x.f.v_nxv16i32_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -206,16 +208,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfwcvt.rtz.x.f.v.nxv1i64.nxv1f32( , - i32); + iXLen); -define @intrinsic_vfwcvt_rtz.x.f.v_nxv1i64_nxv1f32( %0, i32 %1) nounwind { +define @intrinsic_vfwcvt_rtz.x.f.v_nxv1i64_nxv1f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_rtz.x.f.v_nxv1i64_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -225,7 +227,7 @@ define @intrinsic_vfwcvt_rtz.x.f.v_nxv1i64_nxv1f32( @llvm.riscv.vfwcvt.rtz.x.f.v.nxv1i64.nxv1f32( %0, - i32 %1) + iXLen %1) ret %a } @@ -234,10 +236,10 @@ declare @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv1i64.nxv1f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv1i64_nxv1f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv1i64_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.x.f.v_nxv1i64_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -248,16 +250,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfwcvt.rtz.x.f.v.nxv2i64.nxv2f32( , - i32); + iXLen); -define @intrinsic_vfwcvt_rtz.x.f.v_nxv2i64_nxv2f32( %0, i32 %1) nounwind { +define @intrinsic_vfwcvt_rtz.x.f.v_nxv2i64_nxv2f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_rtz.x.f.v_nxv2i64_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -267,7 +269,7 @@ define @intrinsic_vfwcvt_rtz.x.f.v_nxv2i64_nxv2f32( @llvm.riscv.vfwcvt.rtz.x.f.v.nxv2i64.nxv2f32( %0, - i32 %1) + iXLen %1) ret %a } @@ -276,10 +278,10 @@ declare @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv2i64.nxv2f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv2i64_nxv2f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv2i64_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.x.f.v_nxv2i64_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -290,16 +292,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfwcvt.rtz.x.f.v.nxv4i64.nxv4f32( , - i32); + iXLen); -define @intrinsic_vfwcvt_rtz.x.f.v_nxv4i64_nxv4f32( %0, i32 %1) nounwind { +define @intrinsic_vfwcvt_rtz.x.f.v_nxv4i64_nxv4f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_rtz.x.f.v_nxv4i64_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -309,7 +311,7 @@ define @intrinsic_vfwcvt_rtz.x.f.v_nxv4i64_nxv4f32( @llvm.riscv.vfwcvt.rtz.x.f.v.nxv4i64.nxv4f32( %0, - i32 %1) + iXLen %1) ret %a } @@ -318,10 +320,10 @@ declare @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv4i64.nxv4f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv4i64_nxv4f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv4i64_nxv4f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.x.f.v_nxv4i64_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -332,16 +334,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfwcvt.rtz.x.f.v.nxv8i64.nxv8f32( , - i32); + iXLen); -define @intrinsic_vfwcvt_rtz.x.f.v_nxv8i64_nxv8f32( %0, i32 %1) nounwind { +define @intrinsic_vfwcvt_rtz.x.f.v_nxv8i64_nxv8f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_rtz.x.f.v_nxv8i64_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -351,7 +353,7 @@ define @intrinsic_vfwcvt_rtz.x.f.v_nxv8i64_nxv8f32( @llvm.riscv.vfwcvt.rtz.x.f.v.nxv8i64.nxv8f32( %0, - i32 %1) + iXLen %1) ret %a } @@ -360,10 +362,10 @@ declare @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv8i64.nxv8f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv8i64_nxv8f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv8i64_nxv8f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.x.f.v_nxv8i64_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -374,7 +376,7 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-xu-f-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-xu-f-rv32.ll deleted file mode 100644 index c419e08471ca..000000000000 --- a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-xu-f-rv32.ll +++ /dev/null @@ -1,380 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s -declare @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv1i32.nxv1f16( - , - i32); - -define @intrinsic_vfwcvt_rtz.xu.f.v_nxv1i32_nxv1f16( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_rtz.xu.f.v_nxv1i32_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfwcvt.rtz.xu.f.v v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv1i32.nxv1f16( - %0, - i32 %1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv1i32.nxv1f16( - , - , - , - i32, - i32); - -define @intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv1i32_nxv1f16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv1i32_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfwcvt.rtz.xu.f.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv1i32.nxv1f16( - %0, - %1, - %2, - i32 %3, i32 1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv2i32.nxv2f16( - , - i32); - -define @intrinsic_vfwcvt_rtz.xu.f.v_nxv2i32_nxv2f16( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_rtz.xu.f.v_nxv2i32_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfwcvt.rtz.xu.f.v v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv2i32.nxv2f16( - %0, - i32 %1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv2i32.nxv2f16( - , - , - , - i32, - i32); - -define @intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv2i32_nxv2f16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv2i32_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfwcvt.rtz.xu.f.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv2i32.nxv2f16( - %0, - %1, - %2, - i32 %3, i32 1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv4i32.nxv4f16( - , - i32); - -define @intrinsic_vfwcvt_rtz.xu.f.v_nxv4i32_nxv4f16( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_rtz.xu.f.v_nxv4i32_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfwcvt.rtz.xu.f.v v10, v8 -; CHECK-NEXT: vmv2r.v v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv4i32.nxv4f16( - %0, - i32 %1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv4i32.nxv4f16( - , - , - , - i32, - i32); - -define @intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv4i32_nxv4f16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv4i32_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfwcvt.rtz.xu.f.v v8, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv4i32.nxv4f16( - %0, - %1, - %2, - i32 %3, i32 1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv8i32.nxv8f16( - , - i32); - -define @intrinsic_vfwcvt_rtz.xu.f.v_nxv8i32_nxv8f16( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_rtz.xu.f.v_nxv8i32_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfwcvt.rtz.xu.f.v v12, v8 -; CHECK-NEXT: vmv4r.v v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv8i32.nxv8f16( - %0, - i32 %1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv8i32.nxv8f16( - , - , - , - i32, - i32); - -define @intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv8i32_nxv8f16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv8i32_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfwcvt.rtz.xu.f.v v8, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv8i32.nxv8f16( - %0, - %1, - %2, - i32 %3, i32 1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv16i32.nxv16f16( - , - i32); - -define @intrinsic_vfwcvt_rtz.xu.f.v_nxv16i32_nxv16f16( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_rtz.xu.f.v_nxv16i32_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfwcvt.rtz.xu.f.v v16, v8 -; CHECK-NEXT: vmv8r.v v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv16i32.nxv16f16( - %0, - i32 %1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv16i32.nxv16f16( - , - , - , - i32, - i32); - -define @intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv16i32_nxv16f16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv16i32_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfwcvt.rtz.xu.f.v v8, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv16i32.nxv16f16( - %0, - %1, - %2, - i32 %3, i32 1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv1i64.nxv1f32( - , - i32); - -define @intrinsic_vfwcvt_rtz.xu.f.v_nxv1i64_nxv1f32( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_rtz.xu.f.v_nxv1i64_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfwcvt.rtz.xu.f.v v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv1i64.nxv1f32( - %0, - i32 %1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv1i64.nxv1f32( - , - , - , - i32, - i32); - -define @intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv1i64_nxv1f32( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv1i64_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfwcvt.rtz.xu.f.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv1i64.nxv1f32( - %0, - %1, - %2, - i32 %3, i32 1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv2i64.nxv2f32( - , - i32); - -define @intrinsic_vfwcvt_rtz.xu.f.v_nxv2i64_nxv2f32( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_rtz.xu.f.v_nxv2i64_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfwcvt.rtz.xu.f.v v10, v8 -; CHECK-NEXT: vmv2r.v v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv2i64.nxv2f32( - %0, - i32 %1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv2i64.nxv2f32( - , - , - , - i32, - i32); - -define @intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv2i64_nxv2f32( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv2i64_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfwcvt.rtz.xu.f.v v8, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv2i64.nxv2f32( - %0, - %1, - %2, - i32 %3, i32 1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv4i64.nxv4f32( - , - i32); - -define @intrinsic_vfwcvt_rtz.xu.f.v_nxv4i64_nxv4f32( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_rtz.xu.f.v_nxv4i64_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfwcvt.rtz.xu.f.v v12, v8 -; CHECK-NEXT: vmv4r.v v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv4i64.nxv4f32( - %0, - i32 %1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv4i64.nxv4f32( - , - , - , - i32, - i32); - -define @intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv4i64_nxv4f32( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv4i64_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfwcvt.rtz.xu.f.v v8, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv4i64.nxv4f32( - %0, - %1, - %2, - i32 %3, i32 1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv8i64.nxv8f32( - , - i32); - -define @intrinsic_vfwcvt_rtz.xu.f.v_nxv8i64_nxv8f32( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_rtz.xu.f.v_nxv8i64_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfwcvt.rtz.xu.f.v v16, v8 -; CHECK-NEXT: vmv8r.v v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv8i64.nxv8f32( - %0, - i32 %1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv8i64.nxv8f32( - , - , - , - i32, - i32); - -define @intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv8i64_nxv8f32( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv8i64_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfwcvt.rtz.xu.f.v v8, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv8i64.nxv8f32( - %0, - %1, - %2, - i32 %3, i32 1) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-xu-f-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-xu-f.ll similarity index 86% rename from llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-xu-f-rv64.ll rename to llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-xu-f.ll index 6b881aba8f65..f3d786a37fbe 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-xu-f-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-xu-f.ll @@ -1,11 +1,13 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv1i32.nxv1f16( , - i64); + iXLen); -define @intrinsic_vfwcvt_rtz.xu.f.v_nxv1i32_nxv1f16( %0, i64 %1) nounwind { +define @intrinsic_vfwcvt_rtz.xu.f.v_nxv1i32_nxv1f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_rtz.xu.f.v_nxv1i32_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -15,7 +17,7 @@ define @intrinsic_vfwcvt_rtz.xu.f.v_nxv1i32_nxv1f16( @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv1i32.nxv1f16( %0, - i64 %1) + iXLen %1) ret %a } @@ -24,10 +26,10 @@ declare @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv1i32.nxv1f16( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv1i32_nxv1f16( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv1i32_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv1i32_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -38,16 +40,16 @@ entry: %0, %1, %2, - i64 %3, i64 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv2i32.nxv2f16( , - i64); + iXLen); -define @intrinsic_vfwcvt_rtz.xu.f.v_nxv2i32_nxv2f16( %0, i64 %1) nounwind { +define @intrinsic_vfwcvt_rtz.xu.f.v_nxv2i32_nxv2f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_rtz.xu.f.v_nxv2i32_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -57,7 +59,7 @@ define @intrinsic_vfwcvt_rtz.xu.f.v_nxv2i32_nxv2f16( @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv2i32.nxv2f16( %0, - i64 %1) + iXLen %1) ret %a } @@ -66,10 +68,10 @@ declare @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv2i32.nxv2f16( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv2i32_nxv2f16( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv2i32_nxv2f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv2i32_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -80,16 +82,16 @@ entry: %0, %1, %2, - i64 %3, i64 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv4i32.nxv4f16( , - i64); + iXLen); -define @intrinsic_vfwcvt_rtz.xu.f.v_nxv4i32_nxv4f16( %0, i64 %1) nounwind { +define @intrinsic_vfwcvt_rtz.xu.f.v_nxv4i32_nxv4f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_rtz.xu.f.v_nxv4i32_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -99,7 +101,7 @@ define @intrinsic_vfwcvt_rtz.xu.f.v_nxv4i32_nxv4f16( @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv4i32.nxv4f16( %0, - i64 %1) + iXLen %1) ret %a } @@ -108,10 +110,10 @@ declare @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv4i32.nxv4f16( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv4i32_nxv4f16( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv4i32_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv4i32_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -122,16 +124,16 @@ entry: %0, %1, %2, - i64 %3, i64 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv8i32.nxv8f16( , - i64); + iXLen); -define @intrinsic_vfwcvt_rtz.xu.f.v_nxv8i32_nxv8f16( %0, i64 %1) nounwind { +define @intrinsic_vfwcvt_rtz.xu.f.v_nxv8i32_nxv8f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_rtz.xu.f.v_nxv8i32_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -141,7 +143,7 @@ define @intrinsic_vfwcvt_rtz.xu.f.v_nxv8i32_nxv8f16( @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv8i32.nxv8f16( %0, - i64 %1) + iXLen %1) ret %a } @@ -150,10 +152,10 @@ declare @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv8i32.nxv8f16( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv8i32_nxv8f16( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv8i32_nxv8f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv8i32_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -164,16 +166,16 @@ entry: %0, %1, %2, - i64 %3, i64 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv16i32.nxv16f16( , - i64); + iXLen); -define @intrinsic_vfwcvt_rtz.xu.f.v_nxv16i32_nxv16f16( %0, i64 %1) nounwind { +define @intrinsic_vfwcvt_rtz.xu.f.v_nxv16i32_nxv16f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_rtz.xu.f.v_nxv16i32_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -183,7 +185,7 @@ define @intrinsic_vfwcvt_rtz.xu.f.v_nxv16i32_nxv16f16( @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv16i32.nxv16f16( %0, - i64 %1) + iXLen %1) ret %a } @@ -192,10 +194,10 @@ declare @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv16i32.nxv16f16 , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv16i32_nxv16f16( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv16i32_nxv16f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv16i32_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -206,16 +208,16 @@ entry: %0, %1, %2, - i64 %3, i64 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv1i64.nxv1f32( , - i64); + iXLen); -define @intrinsic_vfwcvt_rtz.xu.f.v_nxv1i64_nxv1f32( %0, i64 %1) nounwind { +define @intrinsic_vfwcvt_rtz.xu.f.v_nxv1i64_nxv1f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_rtz.xu.f.v_nxv1i64_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -225,7 +227,7 @@ define @intrinsic_vfwcvt_rtz.xu.f.v_nxv1i64_nxv1f32( @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv1i64.nxv1f32( %0, - i64 %1) + iXLen %1) ret %a } @@ -234,10 +236,10 @@ declare @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv1i64.nxv1f32( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv1i64_nxv1f32( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv1i64_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv1i64_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -248,16 +250,16 @@ entry: %0, %1, %2, - i64 %3, i64 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv2i64.nxv2f32( , - i64); + iXLen); -define @intrinsic_vfwcvt_rtz.xu.f.v_nxv2i64_nxv2f32( %0, i64 %1) nounwind { +define @intrinsic_vfwcvt_rtz.xu.f.v_nxv2i64_nxv2f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_rtz.xu.f.v_nxv2i64_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -267,7 +269,7 @@ define @intrinsic_vfwcvt_rtz.xu.f.v_nxv2i64_nxv2f32( @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv2i64.nxv2f32( %0, - i64 %1) + iXLen %1) ret %a } @@ -276,10 +278,10 @@ declare @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv2i64.nxv2f32( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv2i64_nxv2f32( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv2i64_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv2i64_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -290,16 +292,16 @@ entry: %0, %1, %2, - i64 %3, i64 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv4i64.nxv4f32( , - i64); + iXLen); -define @intrinsic_vfwcvt_rtz.xu.f.v_nxv4i64_nxv4f32( %0, i64 %1) nounwind { +define @intrinsic_vfwcvt_rtz.xu.f.v_nxv4i64_nxv4f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_rtz.xu.f.v_nxv4i64_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -309,7 +311,7 @@ define @intrinsic_vfwcvt_rtz.xu.f.v_nxv4i64_nxv4f32( @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv4i64.nxv4f32( %0, - i64 %1) + iXLen %1) ret %a } @@ -318,10 +320,10 @@ declare @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv4i64.nxv4f32( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv4i64_nxv4f32( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv4i64_nxv4f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv4i64_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -332,16 +334,16 @@ entry: %0, %1, %2, - i64 %3, i64 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv8i64.nxv8f32( , - i64); + iXLen); -define @intrinsic_vfwcvt_rtz.xu.f.v_nxv8i64_nxv8f32( %0, i64 %1) nounwind { +define @intrinsic_vfwcvt_rtz.xu.f.v_nxv8i64_nxv8f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_rtz.xu.f.v_nxv8i64_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -351,7 +353,7 @@ define @intrinsic_vfwcvt_rtz.xu.f.v_nxv8i64_nxv8f32( @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv8i64.nxv8f32( %0, - i64 %1) + iXLen %1) ret %a } @@ -360,10 +362,10 @@ declare @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv8i64.nxv8f32( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv8i64_nxv8f32( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv8i64_nxv8f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv8i64_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -374,7 +376,7 @@ entry: %0, %1, %2, - i64 %3, i64 1) + iXLen %3, iXLen 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-x-f-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-x-f-rv64.ll deleted file mode 100644 index fd01c64df0d3..000000000000 --- a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-x-f-rv64.ll +++ /dev/null @@ -1,380 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s -declare @llvm.riscv.vfwcvt.x.f.v.nxv1i32.nxv1f16( - , - i64); - -define @intrinsic_vfwcvt_x.f.v_nxv1i32_nxv1f16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_x.f.v_nxv1i32_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfwcvt.x.f.v v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.x.f.v.nxv1i32.nxv1f16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.x.f.v.mask.nxv1i32.nxv1f16( - , - , - , - i64, - i64); - -define @intrinsic_vfwcvt_mask_x.f.v_nxv1i32_nxv1f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_mask_x.f.v_nxv1i32_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfwcvt.x.f.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv1i32.nxv1f16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.x.f.v.nxv2i32.nxv2f16( - , - i64); - -define @intrinsic_vfwcvt_x.f.v_nxv2i32_nxv2f16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_x.f.v_nxv2i32_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfwcvt.x.f.v v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.x.f.v.nxv2i32.nxv2f16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.x.f.v.mask.nxv2i32.nxv2f16( - , - , - , - i64, - i64); - -define @intrinsic_vfwcvt_mask_x.f.v_nxv2i32_nxv2f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_mask_x.f.v_nxv2i32_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfwcvt.x.f.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv2i32.nxv2f16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.x.f.v.nxv4i32.nxv4f16( - , - i64); - -define @intrinsic_vfwcvt_x.f.v_nxv4i32_nxv4f16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_x.f.v_nxv4i32_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfwcvt.x.f.v v10, v8 -; CHECK-NEXT: vmv2r.v v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.x.f.v.nxv4i32.nxv4f16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.x.f.v.mask.nxv4i32.nxv4f16( - , - , - , - i64, - i64); - -define @intrinsic_vfwcvt_mask_x.f.v_nxv4i32_nxv4f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_mask_x.f.v_nxv4i32_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfwcvt.x.f.v v8, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv4i32.nxv4f16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.x.f.v.nxv8i32.nxv8f16( - , - i64); - -define @intrinsic_vfwcvt_x.f.v_nxv8i32_nxv8f16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_x.f.v_nxv8i32_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfwcvt.x.f.v v12, v8 -; CHECK-NEXT: vmv4r.v v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.x.f.v.nxv8i32.nxv8f16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.x.f.v.mask.nxv8i32.nxv8f16( - , - , - , - i64, - i64); - -define @intrinsic_vfwcvt_mask_x.f.v_nxv8i32_nxv8f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_mask_x.f.v_nxv8i32_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfwcvt.x.f.v v8, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv8i32.nxv8f16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.x.f.v.nxv16i32.nxv16f16( - , - i64); - -define @intrinsic_vfwcvt_x.f.v_nxv16i32_nxv16f16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_x.f.v_nxv16i32_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfwcvt.x.f.v v16, v8 -; CHECK-NEXT: vmv8r.v v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.x.f.v.nxv16i32.nxv16f16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.x.f.v.mask.nxv16i32.nxv16f16( - , - , - , - i64, - i64); - -define @intrinsic_vfwcvt_mask_x.f.v_nxv16i32_nxv16f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_mask_x.f.v_nxv16i32_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfwcvt.x.f.v v8, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv16i32.nxv16f16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.x.f.v.nxv1i64.nxv1f32( - , - i64); - -define @intrinsic_vfwcvt_x.f.v_nxv1i64_nxv1f32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_x.f.v_nxv1i64_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfwcvt.x.f.v v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.x.f.v.nxv1i64.nxv1f32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.x.f.v.mask.nxv1i64.nxv1f32( - , - , - , - i64, - i64); - -define @intrinsic_vfwcvt_mask_x.f.v_nxv1i64_nxv1f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_mask_x.f.v_nxv1i64_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfwcvt.x.f.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv1i64.nxv1f32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.x.f.v.nxv2i64.nxv2f32( - , - i64); - -define @intrinsic_vfwcvt_x.f.v_nxv2i64_nxv2f32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_x.f.v_nxv2i64_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfwcvt.x.f.v v10, v8 -; CHECK-NEXT: vmv2r.v v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.x.f.v.nxv2i64.nxv2f32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.x.f.v.mask.nxv2i64.nxv2f32( - , - , - , - i64, - i64); - -define @intrinsic_vfwcvt_mask_x.f.v_nxv2i64_nxv2f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_mask_x.f.v_nxv2i64_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfwcvt.x.f.v v8, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv2i64.nxv2f32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.x.f.v.nxv4i64.nxv4f32( - , - i64); - -define @intrinsic_vfwcvt_x.f.v_nxv4i64_nxv4f32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_x.f.v_nxv4i64_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfwcvt.x.f.v v12, v8 -; CHECK-NEXT: vmv4r.v v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.x.f.v.nxv4i64.nxv4f32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.x.f.v.mask.nxv4i64.nxv4f32( - , - , - , - i64, - i64); - -define @intrinsic_vfwcvt_mask_x.f.v_nxv4i64_nxv4f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_mask_x.f.v_nxv4i64_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfwcvt.x.f.v v8, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv4i64.nxv4f32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.x.f.v.nxv8i64.nxv8f32( - , - i64); - -define @intrinsic_vfwcvt_x.f.v_nxv8i64_nxv8f32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_x.f.v_nxv8i64_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfwcvt.x.f.v v16, v8 -; CHECK-NEXT: vmv8r.v v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.x.f.v.nxv8i64.nxv8f32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.x.f.v.mask.nxv8i64.nxv8f32( - , - , - , - i64, - i64); - -define @intrinsic_vfwcvt_mask_x.f.v_nxv8i64_nxv8f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_mask_x.f.v_nxv8i64_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfwcvt.x.f.v v8, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv8i64.nxv8f32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-x-f-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-x-f.ll similarity index 87% rename from llvm/test/CodeGen/RISCV/rvv/vfwcvt-x-f-rv32.ll rename to llvm/test/CodeGen/RISCV/rvv/vfwcvt-x-f.ll index b8d88e1c64e5..6c3c2d7702eb 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-x-f-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-x-f.ll @@ -1,11 +1,13 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfwcvt.x.f.v.nxv1i32.nxv1f16( , - i32); + iXLen); -define @intrinsic_vfwcvt_x.f.v_nxv1i32_nxv1f16( %0, i32 %1) nounwind { +define @intrinsic_vfwcvt_x.f.v_nxv1i32_nxv1f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_x.f.v_nxv1i32_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -15,7 +17,7 @@ define @intrinsic_vfwcvt_x.f.v_nxv1i32_nxv1f16( @llvm.riscv.vfwcvt.x.f.v.nxv1i32.nxv1f16( %0, - i32 %1) + iXLen %1) ret %a } @@ -24,10 +26,10 @@ declare @llvm.riscv.vfwcvt.x.f.v.mask.nxv1i32.nxv1f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwcvt_mask_x.f.v_nxv1i32_nxv1f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwcvt_mask_x.f.v_nxv1i32_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_x.f.v_nxv1i32_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -38,16 +40,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfwcvt.x.f.v.nxv2i32.nxv2f16( , - i32); + iXLen); -define @intrinsic_vfwcvt_x.f.v_nxv2i32_nxv2f16( %0, i32 %1) nounwind { +define @intrinsic_vfwcvt_x.f.v_nxv2i32_nxv2f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_x.f.v_nxv2i32_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -57,7 +59,7 @@ define @intrinsic_vfwcvt_x.f.v_nxv2i32_nxv2f16( @llvm.riscv.vfwcvt.x.f.v.nxv2i32.nxv2f16( %0, - i32 %1) + iXLen %1) ret %a } @@ -66,10 +68,10 @@ declare @llvm.riscv.vfwcvt.x.f.v.mask.nxv2i32.nxv2f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwcvt_mask_x.f.v_nxv2i32_nxv2f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwcvt_mask_x.f.v_nxv2i32_nxv2f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_x.f.v_nxv2i32_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -80,16 +82,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfwcvt.x.f.v.nxv4i32.nxv4f16( , - i32); + iXLen); -define @intrinsic_vfwcvt_x.f.v_nxv4i32_nxv4f16( %0, i32 %1) nounwind { +define @intrinsic_vfwcvt_x.f.v_nxv4i32_nxv4f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_x.f.v_nxv4i32_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -99,7 +101,7 @@ define @intrinsic_vfwcvt_x.f.v_nxv4i32_nxv4f16( @llvm.riscv.vfwcvt.x.f.v.nxv4i32.nxv4f16( %0, - i32 %1) + iXLen %1) ret %a } @@ -108,10 +110,10 @@ declare @llvm.riscv.vfwcvt.x.f.v.mask.nxv4i32.nxv4f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwcvt_mask_x.f.v_nxv4i32_nxv4f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwcvt_mask_x.f.v_nxv4i32_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_x.f.v_nxv4i32_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -122,16 +124,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfwcvt.x.f.v.nxv8i32.nxv8f16( , - i32); + iXLen); -define @intrinsic_vfwcvt_x.f.v_nxv8i32_nxv8f16( %0, i32 %1) nounwind { +define @intrinsic_vfwcvt_x.f.v_nxv8i32_nxv8f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_x.f.v_nxv8i32_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -141,7 +143,7 @@ define @intrinsic_vfwcvt_x.f.v_nxv8i32_nxv8f16( @llvm.riscv.vfwcvt.x.f.v.nxv8i32.nxv8f16( %0, - i32 %1) + iXLen %1) ret %a } @@ -150,10 +152,10 @@ declare @llvm.riscv.vfwcvt.x.f.v.mask.nxv8i32.nxv8f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwcvt_mask_x.f.v_nxv8i32_nxv8f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwcvt_mask_x.f.v_nxv8i32_nxv8f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_x.f.v_nxv8i32_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -164,16 +166,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfwcvt.x.f.v.nxv16i32.nxv16f16( , - i32); + iXLen); -define @intrinsic_vfwcvt_x.f.v_nxv16i32_nxv16f16( %0, i32 %1) nounwind { +define @intrinsic_vfwcvt_x.f.v_nxv16i32_nxv16f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_x.f.v_nxv16i32_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -183,7 +185,7 @@ define @intrinsic_vfwcvt_x.f.v_nxv16i32_nxv16f16( @llvm.riscv.vfwcvt.x.f.v.nxv16i32.nxv16f16( %0, - i32 %1) + iXLen %1) ret %a } @@ -192,10 +194,10 @@ declare @llvm.riscv.vfwcvt.x.f.v.mask.nxv16i32.nxv16f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwcvt_mask_x.f.v_nxv16i32_nxv16f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwcvt_mask_x.f.v_nxv16i32_nxv16f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_x.f.v_nxv16i32_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -206,16 +208,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfwcvt.x.f.v.nxv1i64.nxv1f32( , - i32); + iXLen); -define @intrinsic_vfwcvt_x.f.v_nxv1i64_nxv1f32( %0, i32 %1) nounwind { +define @intrinsic_vfwcvt_x.f.v_nxv1i64_nxv1f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_x.f.v_nxv1i64_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -225,7 +227,7 @@ define @intrinsic_vfwcvt_x.f.v_nxv1i64_nxv1f32( @llvm.riscv.vfwcvt.x.f.v.nxv1i64.nxv1f32( %0, - i32 %1) + iXLen %1) ret %a } @@ -234,10 +236,10 @@ declare @llvm.riscv.vfwcvt.x.f.v.mask.nxv1i64.nxv1f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwcvt_mask_x.f.v_nxv1i64_nxv1f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwcvt_mask_x.f.v_nxv1i64_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_x.f.v_nxv1i64_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -248,16 +250,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfwcvt.x.f.v.nxv2i64.nxv2f32( , - i32); + iXLen); -define @intrinsic_vfwcvt_x.f.v_nxv2i64_nxv2f32( %0, i32 %1) nounwind { +define @intrinsic_vfwcvt_x.f.v_nxv2i64_nxv2f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_x.f.v_nxv2i64_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -267,7 +269,7 @@ define @intrinsic_vfwcvt_x.f.v_nxv2i64_nxv2f32( @llvm.riscv.vfwcvt.x.f.v.nxv2i64.nxv2f32( %0, - i32 %1) + iXLen %1) ret %a } @@ -276,10 +278,10 @@ declare @llvm.riscv.vfwcvt.x.f.v.mask.nxv2i64.nxv2f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwcvt_mask_x.f.v_nxv2i64_nxv2f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwcvt_mask_x.f.v_nxv2i64_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_x.f.v_nxv2i64_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -290,16 +292,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfwcvt.x.f.v.nxv4i64.nxv4f32( , - i32); + iXLen); -define @intrinsic_vfwcvt_x.f.v_nxv4i64_nxv4f32( %0, i32 %1) nounwind { +define @intrinsic_vfwcvt_x.f.v_nxv4i64_nxv4f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_x.f.v_nxv4i64_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -309,7 +311,7 @@ define @intrinsic_vfwcvt_x.f.v_nxv4i64_nxv4f32( @llvm.riscv.vfwcvt.x.f.v.nxv4i64.nxv4f32( %0, - i32 %1) + iXLen %1) ret %a } @@ -318,10 +320,10 @@ declare @llvm.riscv.vfwcvt.x.f.v.mask.nxv4i64.nxv4f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwcvt_mask_x.f.v_nxv4i64_nxv4f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwcvt_mask_x.f.v_nxv4i64_nxv4f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_x.f.v_nxv4i64_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -332,16 +334,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfwcvt.x.f.v.nxv8i64.nxv8f32( , - i32); + iXLen); -define @intrinsic_vfwcvt_x.f.v_nxv8i64_nxv8f32( %0, i32 %1) nounwind { +define @intrinsic_vfwcvt_x.f.v_nxv8i64_nxv8f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_x.f.v_nxv8i64_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -351,7 +353,7 @@ define @intrinsic_vfwcvt_x.f.v_nxv8i64_nxv8f32( @llvm.riscv.vfwcvt.x.f.v.nxv8i64.nxv8f32( %0, - i32 %1) + iXLen %1) ret %a } @@ -360,10 +362,10 @@ declare @llvm.riscv.vfwcvt.x.f.v.mask.nxv8i64.nxv8f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwcvt_mask_x.f.v_nxv8i64_nxv8f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwcvt_mask_x.f.v_nxv8i64_nxv8f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_x.f.v_nxv8i64_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -374,7 +376,7 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-xu-f-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-xu-f-rv64.ll deleted file mode 100644 index dc461d60b0be..000000000000 --- a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-xu-f-rv64.ll +++ /dev/null @@ -1,380 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s -declare @llvm.riscv.vfwcvt.xu.f.v.nxv1i32.nxv1f16( - , - i64); - -define @intrinsic_vfwcvt_xu.f.v_nxv1i32_nxv1f16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_xu.f.v_nxv1i32_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfwcvt.xu.f.v v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.xu.f.v.nxv1i32.nxv1f16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.xu.f.v.mask.nxv1i32.nxv1f16( - , - , - , - i64, - i64); - -define @intrinsic_vfwcvt_mask_xu.f.v_nxv1i32_nxv1f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_nxv1i32_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfwcvt.xu.f.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv1i32.nxv1f16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.xu.f.v.nxv2i32.nxv2f16( - , - i64); - -define @intrinsic_vfwcvt_xu.f.v_nxv2i32_nxv2f16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_xu.f.v_nxv2i32_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfwcvt.xu.f.v v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.xu.f.v.nxv2i32.nxv2f16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.xu.f.v.mask.nxv2i32.nxv2f16( - , - , - , - i64, - i64); - -define @intrinsic_vfwcvt_mask_xu.f.v_nxv2i32_nxv2f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_nxv2i32_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfwcvt.xu.f.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv2i32.nxv2f16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.xu.f.v.nxv4i32.nxv4f16( - , - i64); - -define @intrinsic_vfwcvt_xu.f.v_nxv4i32_nxv4f16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_xu.f.v_nxv4i32_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfwcvt.xu.f.v v10, v8 -; CHECK-NEXT: vmv2r.v v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.xu.f.v.nxv4i32.nxv4f16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.xu.f.v.mask.nxv4i32.nxv4f16( - , - , - , - i64, - i64); - -define @intrinsic_vfwcvt_mask_xu.f.v_nxv4i32_nxv4f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_nxv4i32_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfwcvt.xu.f.v v8, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv4i32.nxv4f16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.xu.f.v.nxv8i32.nxv8f16( - , - i64); - -define @intrinsic_vfwcvt_xu.f.v_nxv8i32_nxv8f16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_xu.f.v_nxv8i32_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfwcvt.xu.f.v v12, v8 -; CHECK-NEXT: vmv4r.v v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.xu.f.v.nxv8i32.nxv8f16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.xu.f.v.mask.nxv8i32.nxv8f16( - , - , - , - i64, - i64); - -define @intrinsic_vfwcvt_mask_xu.f.v_nxv8i32_nxv8f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_nxv8i32_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfwcvt.xu.f.v v8, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv8i32.nxv8f16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.xu.f.v.nxv16i32.nxv16f16( - , - i64); - -define @intrinsic_vfwcvt_xu.f.v_nxv16i32_nxv16f16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_xu.f.v_nxv16i32_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfwcvt.xu.f.v v16, v8 -; CHECK-NEXT: vmv8r.v v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.xu.f.v.nxv16i32.nxv16f16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.xu.f.v.mask.nxv16i32.nxv16f16( - , - , - , - i64, - i64); - -define @intrinsic_vfwcvt_mask_xu.f.v_nxv16i32_nxv16f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_nxv16i32_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfwcvt.xu.f.v v8, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv16i32.nxv16f16( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.xu.f.v.nxv1i64.nxv1f32( - , - i64); - -define @intrinsic_vfwcvt_xu.f.v_nxv1i64_nxv1f32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_xu.f.v_nxv1i64_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfwcvt.xu.f.v v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.xu.f.v.nxv1i64.nxv1f32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.xu.f.v.mask.nxv1i64.nxv1f32( - , - , - , - i64, - i64); - -define @intrinsic_vfwcvt_mask_xu.f.v_nxv1i64_nxv1f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_nxv1i64_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfwcvt.xu.f.v v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv1i64.nxv1f32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.xu.f.v.nxv2i64.nxv2f32( - , - i64); - -define @intrinsic_vfwcvt_xu.f.v_nxv2i64_nxv2f32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_xu.f.v_nxv2i64_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfwcvt.xu.f.v v10, v8 -; CHECK-NEXT: vmv2r.v v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.xu.f.v.nxv2i64.nxv2f32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.xu.f.v.mask.nxv2i64.nxv2f32( - , - , - , - i64, - i64); - -define @intrinsic_vfwcvt_mask_xu.f.v_nxv2i64_nxv2f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_nxv2i64_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfwcvt.xu.f.v v8, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv2i64.nxv2f32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.xu.f.v.nxv4i64.nxv4f32( - , - i64); - -define @intrinsic_vfwcvt_xu.f.v_nxv4i64_nxv4f32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_xu.f.v_nxv4i64_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfwcvt.xu.f.v v12, v8 -; CHECK-NEXT: vmv4r.v v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.xu.f.v.nxv4i64.nxv4f32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.xu.f.v.mask.nxv4i64.nxv4f32( - , - , - , - i64, - i64); - -define @intrinsic_vfwcvt_mask_xu.f.v_nxv4i64_nxv4f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_nxv4i64_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfwcvt.xu.f.v v8, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv4i64.nxv4f32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.xu.f.v.nxv8i64.nxv8f32( - , - i64); - -define @intrinsic_vfwcvt_xu.f.v_nxv8i64_nxv8f32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_xu.f.v_nxv8i64_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfwcvt.xu.f.v v16, v8 -; CHECK-NEXT: vmv8r.v v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.xu.f.v.nxv8i64.nxv8f32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.vfwcvt.xu.f.v.mask.nxv8i64.nxv8f32( - , - , - , - i64, - i64); - -define @intrinsic_vfwcvt_mask_xu.f.v_nxv8i64_nxv8f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_nxv8i64_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfwcvt.xu.f.v v8, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv8i64.nxv8f32( - %0, - %1, - %2, - i64 %3, i64 1) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-xu-f-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-xu-f.ll similarity index 87% rename from llvm/test/CodeGen/RISCV/rvv/vfwcvt-xu-f-rv32.ll rename to llvm/test/CodeGen/RISCV/rvv/vfwcvt-xu-f.ll index c2b0a222709e..10bd22304ed8 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-xu-f-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-xu-f.ll @@ -1,11 +1,13 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfwcvt.xu.f.v.nxv1i32.nxv1f16( , - i32); + iXLen); -define @intrinsic_vfwcvt_xu.f.v_nxv1i32_nxv1f16( %0, i32 %1) nounwind { +define @intrinsic_vfwcvt_xu.f.v_nxv1i32_nxv1f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_xu.f.v_nxv1i32_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -15,7 +17,7 @@ define @intrinsic_vfwcvt_xu.f.v_nxv1i32_nxv1f16( @llvm.riscv.vfwcvt.xu.f.v.nxv1i32.nxv1f16( %0, - i32 %1) + iXLen %1) ret %a } @@ -24,10 +26,10 @@ declare @llvm.riscv.vfwcvt.xu.f.v.mask.nxv1i32.nxv1f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwcvt_mask_xu.f.v_nxv1i32_nxv1f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwcvt_mask_xu.f.v_nxv1i32_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_nxv1i32_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -38,16 +40,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfwcvt.xu.f.v.nxv2i32.nxv2f16( , - i32); + iXLen); -define @intrinsic_vfwcvt_xu.f.v_nxv2i32_nxv2f16( %0, i32 %1) nounwind { +define @intrinsic_vfwcvt_xu.f.v_nxv2i32_nxv2f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_xu.f.v_nxv2i32_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -57,7 +59,7 @@ define @intrinsic_vfwcvt_xu.f.v_nxv2i32_nxv2f16( @llvm.riscv.vfwcvt.xu.f.v.nxv2i32.nxv2f16( %0, - i32 %1) + iXLen %1) ret %a } @@ -66,10 +68,10 @@ declare @llvm.riscv.vfwcvt.xu.f.v.mask.nxv2i32.nxv2f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwcvt_mask_xu.f.v_nxv2i32_nxv2f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwcvt_mask_xu.f.v_nxv2i32_nxv2f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_nxv2i32_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -80,16 +82,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfwcvt.xu.f.v.nxv4i32.nxv4f16( , - i32); + iXLen); -define @intrinsic_vfwcvt_xu.f.v_nxv4i32_nxv4f16( %0, i32 %1) nounwind { +define @intrinsic_vfwcvt_xu.f.v_nxv4i32_nxv4f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_xu.f.v_nxv4i32_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -99,7 +101,7 @@ define @intrinsic_vfwcvt_xu.f.v_nxv4i32_nxv4f16( @llvm.riscv.vfwcvt.xu.f.v.nxv4i32.nxv4f16( %0, - i32 %1) + iXLen %1) ret %a } @@ -108,10 +110,10 @@ declare @llvm.riscv.vfwcvt.xu.f.v.mask.nxv4i32.nxv4f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwcvt_mask_xu.f.v_nxv4i32_nxv4f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwcvt_mask_xu.f.v_nxv4i32_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_nxv4i32_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -122,16 +124,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfwcvt.xu.f.v.nxv8i32.nxv8f16( , - i32); + iXLen); -define @intrinsic_vfwcvt_xu.f.v_nxv8i32_nxv8f16( %0, i32 %1) nounwind { +define @intrinsic_vfwcvt_xu.f.v_nxv8i32_nxv8f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_xu.f.v_nxv8i32_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -141,7 +143,7 @@ define @intrinsic_vfwcvt_xu.f.v_nxv8i32_nxv8f16( @llvm.riscv.vfwcvt.xu.f.v.nxv8i32.nxv8f16( %0, - i32 %1) + iXLen %1) ret %a } @@ -150,10 +152,10 @@ declare @llvm.riscv.vfwcvt.xu.f.v.mask.nxv8i32.nxv8f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwcvt_mask_xu.f.v_nxv8i32_nxv8f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwcvt_mask_xu.f.v_nxv8i32_nxv8f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_nxv8i32_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -164,16 +166,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfwcvt.xu.f.v.nxv16i32.nxv16f16( , - i32); + iXLen); -define @intrinsic_vfwcvt_xu.f.v_nxv16i32_nxv16f16( %0, i32 %1) nounwind { +define @intrinsic_vfwcvt_xu.f.v_nxv16i32_nxv16f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_xu.f.v_nxv16i32_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -183,7 +185,7 @@ define @intrinsic_vfwcvt_xu.f.v_nxv16i32_nxv16f16( @llvm.riscv.vfwcvt.xu.f.v.nxv16i32.nxv16f16( %0, - i32 %1) + iXLen %1) ret %a } @@ -192,10 +194,10 @@ declare @llvm.riscv.vfwcvt.xu.f.v.mask.nxv16i32.nxv16f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwcvt_mask_xu.f.v_nxv16i32_nxv16f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwcvt_mask_xu.f.v_nxv16i32_nxv16f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_nxv16i32_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -206,16 +208,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfwcvt.xu.f.v.nxv1i64.nxv1f32( , - i32); + iXLen); -define @intrinsic_vfwcvt_xu.f.v_nxv1i64_nxv1f32( %0, i32 %1) nounwind { +define @intrinsic_vfwcvt_xu.f.v_nxv1i64_nxv1f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_xu.f.v_nxv1i64_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -225,7 +227,7 @@ define @intrinsic_vfwcvt_xu.f.v_nxv1i64_nxv1f32( @llvm.riscv.vfwcvt.xu.f.v.nxv1i64.nxv1f32( %0, - i32 %1) + iXLen %1) ret %a } @@ -234,10 +236,10 @@ declare @llvm.riscv.vfwcvt.xu.f.v.mask.nxv1i64.nxv1f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwcvt_mask_xu.f.v_nxv1i64_nxv1f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwcvt_mask_xu.f.v_nxv1i64_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_nxv1i64_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -248,16 +250,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfwcvt.xu.f.v.nxv2i64.nxv2f32( , - i32); + iXLen); -define @intrinsic_vfwcvt_xu.f.v_nxv2i64_nxv2f32( %0, i32 %1) nounwind { +define @intrinsic_vfwcvt_xu.f.v_nxv2i64_nxv2f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_xu.f.v_nxv2i64_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -267,7 +269,7 @@ define @intrinsic_vfwcvt_xu.f.v_nxv2i64_nxv2f32( @llvm.riscv.vfwcvt.xu.f.v.nxv2i64.nxv2f32( %0, - i32 %1) + iXLen %1) ret %a } @@ -276,10 +278,10 @@ declare @llvm.riscv.vfwcvt.xu.f.v.mask.nxv2i64.nxv2f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwcvt_mask_xu.f.v_nxv2i64_nxv2f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwcvt_mask_xu.f.v_nxv2i64_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_nxv2i64_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -290,16 +292,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfwcvt.xu.f.v.nxv4i64.nxv4f32( , - i32); + iXLen); -define @intrinsic_vfwcvt_xu.f.v_nxv4i64_nxv4f32( %0, i32 %1) nounwind { +define @intrinsic_vfwcvt_xu.f.v_nxv4i64_nxv4f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_xu.f.v_nxv4i64_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -309,7 +311,7 @@ define @intrinsic_vfwcvt_xu.f.v_nxv4i64_nxv4f32( @llvm.riscv.vfwcvt.xu.f.v.nxv4i64.nxv4f32( %0, - i32 %1) + iXLen %1) ret %a } @@ -318,10 +320,10 @@ declare @llvm.riscv.vfwcvt.xu.f.v.mask.nxv4i64.nxv4f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwcvt_mask_xu.f.v_nxv4i64_nxv4f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwcvt_mask_xu.f.v_nxv4i64_nxv4f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_nxv4i64_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -332,16 +334,16 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfwcvt.xu.f.v.nxv8i64.nxv8f32( , - i32); + iXLen); -define @intrinsic_vfwcvt_xu.f.v_nxv8i64_nxv8f32( %0, i32 %1) nounwind { +define @intrinsic_vfwcvt_xu.f.v_nxv8i64_nxv8f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_xu.f.v_nxv8i64_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -351,7 +353,7 @@ define @intrinsic_vfwcvt_xu.f.v_nxv8i64_nxv8f32( @llvm.riscv.vfwcvt.xu.f.v.nxv8i64.nxv8f32( %0, - i32 %1) + iXLen %1) ret %a } @@ -360,10 +362,10 @@ declare @llvm.riscv.vfwcvt.xu.f.v.mask.nxv8i64.nxv8f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwcvt_mask_xu.f.v_nxv8i64_nxv8f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwcvt_mask_xu.f.v_nxv8i64_nxv8f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_nxv8i64_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -374,7 +376,7 @@ entry: %0, %1, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwmacc-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwmacc-rv64.ll deleted file mode 100644 index eb21c54c18e9..000000000000 --- a/llvm/test/CodeGen/RISCV/rvv/vfwmacc-rv64.ll +++ /dev/null @@ -1,830 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: -target-abi=lp64d < %s | FileCheck %s -declare @llvm.riscv.vfwmacc.nxv1f32.nxv1f16( - , - , - , - i64); - -define @intrinsic_vfwmacc_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwmacc_vv_nxv1f32_nxv1f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu -; CHECK-NEXT: vfwmacc.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmacc.nxv1f32.nxv1f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1f16( - , - , - , - , - i64); - -define @intrinsic_vfwmacc_mask_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv1f32_nxv1f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu -; CHECK-NEXT: vfwmacc.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1f16( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwmacc.nxv2f32.nxv2f16( - , - , - , - i64); - -define @intrinsic_vfwmacc_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwmacc_vv_nxv2f32_nxv2f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu -; CHECK-NEXT: vfwmacc.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmacc.nxv2f32.nxv2f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwmacc.mask.nxv2f32.nxv2f16( - , - , - , - , - i64); - -define @intrinsic_vfwmacc_mask_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv2f32_nxv2f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu -; CHECK-NEXT: vfwmacc.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmacc.mask.nxv2f32.nxv2f16( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwmacc.nxv4f32.nxv4f16( - , - , - , - i64); - -define @intrinsic_vfwmacc_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwmacc_vv_nxv4f32_nxv4f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu -; CHECK-NEXT: vfwmacc.vv v8, v10, v11 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmacc.nxv4f32.nxv4f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwmacc.mask.nxv4f32.nxv4f16( - , - , - , - , - i64); - -define @intrinsic_vfwmacc_mask_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv4f32_nxv4f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu -; CHECK-NEXT: vfwmacc.vv v8, v10, v11, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmacc.mask.nxv4f32.nxv4f16( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwmacc.nxv8f32.nxv8f16( - , - , - , - i64); - -define @intrinsic_vfwmacc_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwmacc_vv_nxv8f32_nxv8f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu -; CHECK-NEXT: vfwmacc.vv v8, v12, v14 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmacc.nxv8f32.nxv8f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwmacc.mask.nxv8f32.nxv8f16( - , - , - , - , - i64); - -define @intrinsic_vfwmacc_mask_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv8f32_nxv8f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu -; CHECK-NEXT: vfwmacc.vv v8, v12, v14, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmacc.mask.nxv8f32.nxv8f16( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwmacc.nxv16f32.nxv16f16( - , - , - , - i64); - -define @intrinsic_vfwmacc_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwmacc_vv_nxv16f32_nxv16f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu -; CHECK-NEXT: vfwmacc.vv v8, v16, v20 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmacc.nxv16f32.nxv16f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwmacc.mask.nxv16f32.nxv16f16( - , - , - , - , - i64); - -define @intrinsic_vfwmacc_mask_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv16f32_nxv16f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu -; CHECK-NEXT: vfwmacc.vv v8, v16, v20, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmacc.mask.nxv16f32.nxv16f16( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwmacc.nxv1f64.nxv1f32( - , - , - , - i64); - -define @intrinsic_vfwmacc_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwmacc_vv_nxv1f64_nxv1f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu -; CHECK-NEXT: vfwmacc.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmacc.nxv1f64.nxv1f32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwmacc.mask.nxv1f64.nxv1f32( - , - , - , - , - i64); - -define @intrinsic_vfwmacc_mask_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv1f64_nxv1f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu -; CHECK-NEXT: vfwmacc.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmacc.mask.nxv1f64.nxv1f32( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwmacc.nxv2f64.nxv2f32( - , - , - , - i64); - -define @intrinsic_vfwmacc_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwmacc_vv_nxv2f64_nxv2f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu -; CHECK-NEXT: vfwmacc.vv v8, v10, v11 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmacc.nxv2f64.nxv2f32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwmacc.mask.nxv2f64.nxv2f32( - , - , - , - , - i64); - -define @intrinsic_vfwmacc_mask_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv2f64_nxv2f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu -; CHECK-NEXT: vfwmacc.vv v8, v10, v11, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmacc.mask.nxv2f64.nxv2f32( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwmacc.nxv4f64.nxv4f32( - , - , - , - i64); - -define @intrinsic_vfwmacc_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwmacc_vv_nxv4f64_nxv4f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu -; CHECK-NEXT: vfwmacc.vv v8, v12, v14 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmacc.nxv4f64.nxv4f32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwmacc.mask.nxv4f64.nxv4f32( - , - , - , - , - i64); - -define @intrinsic_vfwmacc_mask_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv4f64_nxv4f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu -; CHECK-NEXT: vfwmacc.vv v8, v12, v14, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmacc.mask.nxv4f64.nxv4f32( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwmacc.nxv8f64.nxv8f32( - , - , - , - i64); - -define @intrinsic_vfwmacc_vv_nxv8f64_nxv8f32_nxv8f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwmacc_vv_nxv8f64_nxv8f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu -; CHECK-NEXT: vfwmacc.vv v8, v16, v20 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmacc.nxv8f64.nxv8f32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwmacc.mask.nxv8f64.nxv8f32( - , - , - , - , - i64); - -define @intrinsic_vfwmacc_mask_vv_nxv8f64_nxv8f32_nxv8f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv8f64_nxv8f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu -; CHECK-NEXT: vfwmacc.vv v8, v16, v20, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmacc.mask.nxv8f64.nxv8f32( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwmacc.nxv1f32.f16( - , - half, - , - i64); - -define @intrinsic_vfwmacc_vf_nxv1f32_f16_nxv1f16( %0, half %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwmacc_vf_nxv1f32_f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu -; CHECK-NEXT: vfwmacc.vf v8, fa0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmacc.nxv1f32.f16( - %0, - half %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwmacc.mask.nxv1f32.f16( - , - half, - , - , - i64); - -define @intrinsic_vfwmacc_mask_vf_nxv1f32_f16_nxv1f16( %0, half %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv1f32_f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu -; CHECK-NEXT: vfwmacc.vf v8, fa0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmacc.mask.nxv1f32.f16( - %0, - half %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwmacc.nxv2f32.f16( - , - half, - , - i64); - -define @intrinsic_vfwmacc_vf_nxv2f32_f16_nxv2f16( %0, half %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwmacc_vf_nxv2f32_f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu -; CHECK-NEXT: vfwmacc.vf v8, fa0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmacc.nxv2f32.f16( - %0, - half %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwmacc.mask.nxv2f32.f16( - , - half, - , - , - i64); - -define @intrinsic_vfwmacc_mask_vf_nxv2f32_f16_nxv2f16( %0, half %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv2f32_f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu -; CHECK-NEXT: vfwmacc.vf v8, fa0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmacc.mask.nxv2f32.f16( - %0, - half %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwmacc.nxv4f32.f16( - , - half, - , - i64); - -define @intrinsic_vfwmacc_vf_nxv4f32_f16_nxv4f16( %0, half %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwmacc_vf_nxv4f32_f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu -; CHECK-NEXT: vfwmacc.vf v8, fa0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmacc.nxv4f32.f16( - %0, - half %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwmacc.mask.nxv4f32.f16( - , - half, - , - , - i64); - -define @intrinsic_vfwmacc_mask_vf_nxv4f32_f16_nxv4f16( %0, half %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv4f32_f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu -; CHECK-NEXT: vfwmacc.vf v8, fa0, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmacc.mask.nxv4f32.f16( - %0, - half %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwmacc.nxv8f32.f16( - , - half, - , - i64); - -define @intrinsic_vfwmacc_vf_nxv8f32_f16_nxv8f16( %0, half %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwmacc_vf_nxv8f32_f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu -; CHECK-NEXT: vfwmacc.vf v8, fa0, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmacc.nxv8f32.f16( - %0, - half %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwmacc.mask.nxv8f32.f16( - , - half, - , - , - i64); - -define @intrinsic_vfwmacc_mask_vf_nxv8f32_f16_nxv8f16( %0, half %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv8f32_f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu -; CHECK-NEXT: vfwmacc.vf v8, fa0, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmacc.mask.nxv8f32.f16( - %0, - half %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwmacc.nxv16f32.f16( - , - half, - , - i64); - -define @intrinsic_vfwmacc_vf_nxv16f32_f16_nxv16f16( %0, half %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwmacc_vf_nxv16f32_f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu -; CHECK-NEXT: vfwmacc.vf v8, fa0, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmacc.nxv16f32.f16( - %0, - half %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwmacc.mask.nxv16f32.f16( - , - half, - , - , - i64); - -define @intrinsic_vfwmacc_mask_vf_nxv16f32_f16_nxv16f16( %0, half %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv16f32_f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu -; CHECK-NEXT: vfwmacc.vf v8, fa0, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmacc.mask.nxv16f32.f16( - %0, - half %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwmacc.nxv1f64.f32( - , - float, - , - i64); - -define @intrinsic_vfwmacc_vf_nxv1f64_f32_nxv1f32( %0, float %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwmacc_vf_nxv1f64_f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu -; CHECK-NEXT: vfwmacc.vf v8, fa0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmacc.nxv1f64.f32( - %0, - float %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwmacc.mask.nxv1f64.f32( - , - float, - , - , - i64); - -define @intrinsic_vfwmacc_mask_vf_nxv1f64_f32_nxv1f32( %0, float %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv1f64_f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu -; CHECK-NEXT: vfwmacc.vf v8, fa0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmacc.mask.nxv1f64.f32( - %0, - float %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwmacc.nxv2f64.f32( - , - float, - , - i64); - -define @intrinsic_vfwmacc_vf_nxv2f64_f32_nxv2f32( %0, float %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwmacc_vf_nxv2f64_f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu -; CHECK-NEXT: vfwmacc.vf v8, fa0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmacc.nxv2f64.f32( - %0, - float %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwmacc.mask.nxv2f64.f32( - , - float, - , - , - i64); - -define @intrinsic_vfwmacc_mask_vf_nxv2f64_f32_nxv2f32( %0, float %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv2f64_f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu -; CHECK-NEXT: vfwmacc.vf v8, fa0, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmacc.mask.nxv2f64.f32( - %0, - float %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwmacc.nxv4f64.f32( - , - float, - , - i64); - -define @intrinsic_vfwmacc_vf_nxv4f64_f32_nxv4f32( %0, float %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwmacc_vf_nxv4f64_f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu -; CHECK-NEXT: vfwmacc.vf v8, fa0, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmacc.nxv4f64.f32( - %0, - float %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwmacc.mask.nxv4f64.f32( - , - float, - , - , - i64); - -define @intrinsic_vfwmacc_mask_vf_nxv4f64_f32_nxv4f32( %0, float %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv4f64_f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu -; CHECK-NEXT: vfwmacc.vf v8, fa0, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmacc.mask.nxv4f64.f32( - %0, - float %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwmacc.nxv8f64.f32( - , - float, - , - i64); - -define @intrinsic_vfwmacc_vf_nxv8f64_f32_nxv8f32( %0, float %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwmacc_vf_nxv8f64_f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu -; CHECK-NEXT: vfwmacc.vf v8, fa0, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmacc.nxv8f64.f32( - %0, - float %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwmacc.mask.nxv8f64.f32( - , - float, - , - , - i64); - -define @intrinsic_vfwmacc_mask_vf_nxv8f64_f32_nxv8f32( %0, float %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv8f64_f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu -; CHECK-NEXT: vfwmacc.vf v8, fa0, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmacc.mask.nxv8f64.f32( - %0, - float %1, - %2, - %3, - i64 %4) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwmacc-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwmacc.ll similarity index 89% rename from llvm/test/CodeGen/RISCV/rvv/vfwmacc-rv32.ll rename to llvm/test/CodeGen/RISCV/rvv/vfwmacc.ll index 0de121cb3f00..f5db61b5e8c7 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfwmacc-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwmacc.ll @@ -1,13 +1,15 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: -target-abi=ilp32d < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfwmacc.nxv1f32.nxv1f16( , , , - i32); + iXLen); -define @intrinsic_vfwmacc_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwmacc_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_vv_nxv1f32_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu @@ -18,7 +20,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -28,9 +30,9 @@ declare @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1f16( , , , - i32); + iXLen); -define @intrinsic_vfwmacc_mask_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwmacc_mask_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv1f32_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu @@ -42,7 +44,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -51,9 +53,9 @@ declare @llvm.riscv.vfwmacc.nxv2f32.nxv2f16( , , , - i32); + iXLen); -define @intrinsic_vfwmacc_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwmacc_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_vv_nxv2f32_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu @@ -64,7 +66,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -74,9 +76,9 @@ declare @llvm.riscv.vfwmacc.mask.nxv2f32.nxv2f16( , , , - i32); + iXLen); -define @intrinsic_vfwmacc_mask_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwmacc_mask_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv2f32_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu @@ -88,7 +90,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -97,9 +99,9 @@ declare @llvm.riscv.vfwmacc.nxv4f32.nxv4f16( , , , - i32); + iXLen); -define @intrinsic_vfwmacc_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwmacc_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_vv_nxv4f32_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu @@ -110,7 +112,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -120,9 +122,9 @@ declare @llvm.riscv.vfwmacc.mask.nxv4f32.nxv4f16( , , , - i32); + iXLen); -define @intrinsic_vfwmacc_mask_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwmacc_mask_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv4f32_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu @@ -134,7 +136,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -143,9 +145,9 @@ declare @llvm.riscv.vfwmacc.nxv8f32.nxv8f16( , , , - i32); + iXLen); -define @intrinsic_vfwmacc_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwmacc_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_vv_nxv8f32_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu @@ -156,7 +158,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -166,9 +168,9 @@ declare @llvm.riscv.vfwmacc.mask.nxv8f32.nxv8f16( , , , - i32); + iXLen); -define @intrinsic_vfwmacc_mask_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwmacc_mask_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv8f32_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu @@ -180,7 +182,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -189,9 +191,9 @@ declare @llvm.riscv.vfwmacc.nxv16f32.nxv16f16( , , , - i32); + iXLen); -define @intrinsic_vfwmacc_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwmacc_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_vv_nxv16f32_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu @@ -202,7 +204,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -212,9 +214,9 @@ declare @llvm.riscv.vfwmacc.mask.nxv16f32.nxv16f16( , , , - i32); + iXLen); -define @intrinsic_vfwmacc_mask_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwmacc_mask_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv16f32_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu @@ -226,7 +228,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -235,9 +237,9 @@ declare @llvm.riscv.vfwmacc.nxv1f64.nxv1f32( , , , - i32); + iXLen); -define @intrinsic_vfwmacc_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwmacc_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_vv_nxv1f64_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu @@ -248,7 +250,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -258,9 +260,9 @@ declare @llvm.riscv.vfwmacc.mask.nxv1f64.nxv1f32( , , , - i32); + iXLen); -define @intrinsic_vfwmacc_mask_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwmacc_mask_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv1f64_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu @@ -272,7 +274,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -281,9 +283,9 @@ declare @llvm.riscv.vfwmacc.nxv2f64.nxv2f32( , , , - i32); + iXLen); -define @intrinsic_vfwmacc_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwmacc_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_vv_nxv2f64_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu @@ -294,7 +296,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -304,9 +306,9 @@ declare @llvm.riscv.vfwmacc.mask.nxv2f64.nxv2f32( , , , - i32); + iXLen); -define @intrinsic_vfwmacc_mask_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwmacc_mask_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv2f64_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu @@ -318,7 +320,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -327,9 +329,9 @@ declare @llvm.riscv.vfwmacc.nxv4f64.nxv4f32( , , , - i32); + iXLen); -define @intrinsic_vfwmacc_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwmacc_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_vv_nxv4f64_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu @@ -340,7 +342,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -350,9 +352,9 @@ declare @llvm.riscv.vfwmacc.mask.nxv4f64.nxv4f32( , , , - i32); + iXLen); -define @intrinsic_vfwmacc_mask_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwmacc_mask_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv4f64_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu @@ -364,7 +366,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -373,9 +375,9 @@ declare @llvm.riscv.vfwmacc.nxv8f64.nxv8f32( , , , - i32); + iXLen); -define @intrinsic_vfwmacc_vv_nxv8f64_nxv8f32_nxv8f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwmacc_vv_nxv8f64_nxv8f32_nxv8f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_vv_nxv8f64_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu @@ -386,7 +388,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -396,9 +398,9 @@ declare @llvm.riscv.vfwmacc.mask.nxv8f64.nxv8f32( , , , - i32); + iXLen); -define @intrinsic_vfwmacc_mask_vv_nxv8f64_nxv8f32_nxv8f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwmacc_mask_vv_nxv8f64_nxv8f32_nxv8f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv8f64_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu @@ -410,7 +412,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -419,9 +421,9 @@ declare @llvm.riscv.vfwmacc.nxv1f32.f16( , half, , - i32); + iXLen); -define @intrinsic_vfwmacc_vf_nxv1f32_f16_nxv1f16( %0, half %1, %2, i32 %3) nounwind { +define @intrinsic_vfwmacc_vf_nxv1f32_f16_nxv1f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_vf_nxv1f32_f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu @@ -432,7 +434,7 @@ entry: %0, half %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -442,9 +444,9 @@ declare @llvm.riscv.vfwmacc.mask.nxv1f32.f16( half, , , - i32); + iXLen); -define @intrinsic_vfwmacc_mask_vf_nxv1f32_f16_nxv1f16( %0, half %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwmacc_mask_vf_nxv1f32_f16_nxv1f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv1f32_f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu @@ -456,7 +458,7 @@ entry: half %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -465,9 +467,9 @@ declare @llvm.riscv.vfwmacc.nxv2f32.f16( , half, , - i32); + iXLen); -define @intrinsic_vfwmacc_vf_nxv2f32_f16_nxv2f16( %0, half %1, %2, i32 %3) nounwind { +define @intrinsic_vfwmacc_vf_nxv2f32_f16_nxv2f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_vf_nxv2f32_f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu @@ -478,7 +480,7 @@ entry: %0, half %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -488,9 +490,9 @@ declare @llvm.riscv.vfwmacc.mask.nxv2f32.f16( half, , , - i32); + iXLen); -define @intrinsic_vfwmacc_mask_vf_nxv2f32_f16_nxv2f16( %0, half %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwmacc_mask_vf_nxv2f32_f16_nxv2f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv2f32_f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu @@ -502,7 +504,7 @@ entry: half %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -511,9 +513,9 @@ declare @llvm.riscv.vfwmacc.nxv4f32.f16( , half, , - i32); + iXLen); -define @intrinsic_vfwmacc_vf_nxv4f32_f16_nxv4f16( %0, half %1, %2, i32 %3) nounwind { +define @intrinsic_vfwmacc_vf_nxv4f32_f16_nxv4f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_vf_nxv4f32_f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu @@ -524,7 +526,7 @@ entry: %0, half %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -534,9 +536,9 @@ declare @llvm.riscv.vfwmacc.mask.nxv4f32.f16( half, , , - i32); + iXLen); -define @intrinsic_vfwmacc_mask_vf_nxv4f32_f16_nxv4f16( %0, half %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwmacc_mask_vf_nxv4f32_f16_nxv4f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv4f32_f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu @@ -548,7 +550,7 @@ entry: half %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -557,9 +559,9 @@ declare @llvm.riscv.vfwmacc.nxv8f32.f16( , half, , - i32); + iXLen); -define @intrinsic_vfwmacc_vf_nxv8f32_f16_nxv8f16( %0, half %1, %2, i32 %3) nounwind { +define @intrinsic_vfwmacc_vf_nxv8f32_f16_nxv8f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_vf_nxv8f32_f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu @@ -570,7 +572,7 @@ entry: %0, half %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -580,9 +582,9 @@ declare @llvm.riscv.vfwmacc.mask.nxv8f32.f16( half, , , - i32); + iXLen); -define @intrinsic_vfwmacc_mask_vf_nxv8f32_f16_nxv8f16( %0, half %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwmacc_mask_vf_nxv8f32_f16_nxv8f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv8f32_f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu @@ -594,7 +596,7 @@ entry: half %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -603,9 +605,9 @@ declare @llvm.riscv.vfwmacc.nxv16f32.f16( , half, , - i32); + iXLen); -define @intrinsic_vfwmacc_vf_nxv16f32_f16_nxv16f16( %0, half %1, %2, i32 %3) nounwind { +define @intrinsic_vfwmacc_vf_nxv16f32_f16_nxv16f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_vf_nxv16f32_f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu @@ -616,7 +618,7 @@ entry: %0, half %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -626,9 +628,9 @@ declare @llvm.riscv.vfwmacc.mask.nxv16f32.f16( half, , , - i32); + iXLen); -define @intrinsic_vfwmacc_mask_vf_nxv16f32_f16_nxv16f16( %0, half %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwmacc_mask_vf_nxv16f32_f16_nxv16f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv16f32_f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu @@ -640,7 +642,7 @@ entry: half %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -649,9 +651,9 @@ declare @llvm.riscv.vfwmacc.nxv1f64.f32( , float, , - i32); + iXLen); -define @intrinsic_vfwmacc_vf_nxv1f64_f32_nxv1f32( %0, float %1, %2, i32 %3) nounwind { +define @intrinsic_vfwmacc_vf_nxv1f64_f32_nxv1f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_vf_nxv1f64_f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu @@ -662,7 +664,7 @@ entry: %0, float %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -672,9 +674,9 @@ declare @llvm.riscv.vfwmacc.mask.nxv1f64.f32( float, , , - i32); + iXLen); -define @intrinsic_vfwmacc_mask_vf_nxv1f64_f32_nxv1f32( %0, float %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwmacc_mask_vf_nxv1f64_f32_nxv1f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv1f64_f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu @@ -686,7 +688,7 @@ entry: float %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -695,9 +697,9 @@ declare @llvm.riscv.vfwmacc.nxv2f64.f32( , float, , - i32); + iXLen); -define @intrinsic_vfwmacc_vf_nxv2f64_f32_nxv2f32( %0, float %1, %2, i32 %3) nounwind { +define @intrinsic_vfwmacc_vf_nxv2f64_f32_nxv2f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_vf_nxv2f64_f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu @@ -708,7 +710,7 @@ entry: %0, float %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -718,9 +720,9 @@ declare @llvm.riscv.vfwmacc.mask.nxv2f64.f32( float, , , - i32); + iXLen); -define @intrinsic_vfwmacc_mask_vf_nxv2f64_f32_nxv2f32( %0, float %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwmacc_mask_vf_nxv2f64_f32_nxv2f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv2f64_f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu @@ -732,7 +734,7 @@ entry: float %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -741,9 +743,9 @@ declare @llvm.riscv.vfwmacc.nxv4f64.f32( , float, , - i32); + iXLen); -define @intrinsic_vfwmacc_vf_nxv4f64_f32_nxv4f32( %0, float %1, %2, i32 %3) nounwind { +define @intrinsic_vfwmacc_vf_nxv4f64_f32_nxv4f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_vf_nxv4f64_f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu @@ -754,7 +756,7 @@ entry: %0, float %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -764,9 +766,9 @@ declare @llvm.riscv.vfwmacc.mask.nxv4f64.f32( float, , , - i32); + iXLen); -define @intrinsic_vfwmacc_mask_vf_nxv4f64_f32_nxv4f32( %0, float %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwmacc_mask_vf_nxv4f64_f32_nxv4f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv4f64_f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu @@ -778,7 +780,7 @@ entry: float %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -787,9 +789,9 @@ declare @llvm.riscv.vfwmacc.nxv8f64.f32( , float, , - i32); + iXLen); -define @intrinsic_vfwmacc_vf_nxv8f64_f32_nxv8f32( %0, float %1, %2, i32 %3) nounwind { +define @intrinsic_vfwmacc_vf_nxv8f64_f32_nxv8f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_vf_nxv8f64_f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu @@ -800,7 +802,7 @@ entry: %0, float %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -810,9 +812,9 @@ declare @llvm.riscv.vfwmacc.mask.nxv8f64.f32( float, , , - i32); + iXLen); -define @intrinsic_vfwmacc_mask_vf_nxv8f64_f32_nxv8f32( %0, float %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwmacc_mask_vf_nxv8f64_f32_nxv8f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv8f64_f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu @@ -824,7 +826,7 @@ entry: float %1, %2, %3, - i32 %4) + iXLen %4) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwmsac-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwmsac-rv64.ll deleted file mode 100644 index b2e1e235e969..000000000000 --- a/llvm/test/CodeGen/RISCV/rvv/vfwmsac-rv64.ll +++ /dev/null @@ -1,830 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: -target-abi=lp64d < %s | FileCheck %s -declare @llvm.riscv.vfwmsac.nxv1f32.nxv1f16( - , - , - , - i64); - -define @intrinsic_vfwmsac_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv1f32_nxv1f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu -; CHECK-NEXT: vfwmsac.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmsac.nxv1f32.nxv1f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwmsac.mask.nxv1f32.nxv1f16( - , - , - , - , - i64); - -define @intrinsic_vfwmsac_mask_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv1f32_nxv1f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu -; CHECK-NEXT: vfwmsac.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmsac.mask.nxv1f32.nxv1f16( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwmsac.nxv2f32.nxv2f16( - , - , - , - i64); - -define @intrinsic_vfwmsac_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv2f32_nxv2f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu -; CHECK-NEXT: vfwmsac.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmsac.nxv2f32.nxv2f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwmsac.mask.nxv2f32.nxv2f16( - , - , - , - , - i64); - -define @intrinsic_vfwmsac_mask_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv2f32_nxv2f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu -; CHECK-NEXT: vfwmsac.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmsac.mask.nxv2f32.nxv2f16( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwmsac.nxv4f32.nxv4f16( - , - , - , - i64); - -define @intrinsic_vfwmsac_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv4f32_nxv4f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu -; CHECK-NEXT: vfwmsac.vv v8, v10, v11 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmsac.nxv4f32.nxv4f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwmsac.mask.nxv4f32.nxv4f16( - , - , - , - , - i64); - -define @intrinsic_vfwmsac_mask_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv4f32_nxv4f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu -; CHECK-NEXT: vfwmsac.vv v8, v10, v11, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmsac.mask.nxv4f32.nxv4f16( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwmsac.nxv8f32.nxv8f16( - , - , - , - i64); - -define @intrinsic_vfwmsac_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv8f32_nxv8f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu -; CHECK-NEXT: vfwmsac.vv v8, v12, v14 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmsac.nxv8f32.nxv8f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwmsac.mask.nxv8f32.nxv8f16( - , - , - , - , - i64); - -define @intrinsic_vfwmsac_mask_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv8f32_nxv8f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu -; CHECK-NEXT: vfwmsac.vv v8, v12, v14, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmsac.mask.nxv8f32.nxv8f16( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwmsac.nxv16f32.nxv16f16( - , - , - , - i64); - -define @intrinsic_vfwmsac_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv16f32_nxv16f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu -; CHECK-NEXT: vfwmsac.vv v8, v16, v20 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmsac.nxv16f32.nxv16f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwmsac.mask.nxv16f32.nxv16f16( - , - , - , - , - i64); - -define @intrinsic_vfwmsac_mask_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv16f32_nxv16f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu -; CHECK-NEXT: vfwmsac.vv v8, v16, v20, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmsac.mask.nxv16f32.nxv16f16( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwmsac.nxv1f64.nxv1f32( - , - , - , - i64); - -define @intrinsic_vfwmsac_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv1f64_nxv1f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu -; CHECK-NEXT: vfwmsac.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmsac.nxv1f64.nxv1f32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwmsac.mask.nxv1f64.nxv1f32( - , - , - , - , - i64); - -define @intrinsic_vfwmsac_mask_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv1f64_nxv1f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu -; CHECK-NEXT: vfwmsac.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmsac.mask.nxv1f64.nxv1f32( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwmsac.nxv2f64.nxv2f32( - , - , - , - i64); - -define @intrinsic_vfwmsac_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv2f64_nxv2f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu -; CHECK-NEXT: vfwmsac.vv v8, v10, v11 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmsac.nxv2f64.nxv2f32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwmsac.mask.nxv2f64.nxv2f32( - , - , - , - , - i64); - -define @intrinsic_vfwmsac_mask_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv2f64_nxv2f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu -; CHECK-NEXT: vfwmsac.vv v8, v10, v11, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmsac.mask.nxv2f64.nxv2f32( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwmsac.nxv4f64.nxv4f32( - , - , - , - i64); - -define @intrinsic_vfwmsac_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv4f64_nxv4f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu -; CHECK-NEXT: vfwmsac.vv v8, v12, v14 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmsac.nxv4f64.nxv4f32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwmsac.mask.nxv4f64.nxv4f32( - , - , - , - , - i64); - -define @intrinsic_vfwmsac_mask_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv4f64_nxv4f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu -; CHECK-NEXT: vfwmsac.vv v8, v12, v14, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmsac.mask.nxv4f64.nxv4f32( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwmsac.nxv8f64.nxv8f32( - , - , - , - i64); - -define @intrinsic_vfwmsac_vv_nxv8f64_nxv8f32_nxv8f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv8f64_nxv8f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu -; CHECK-NEXT: vfwmsac.vv v8, v16, v20 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmsac.nxv8f64.nxv8f32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwmsac.mask.nxv8f64.nxv8f32( - , - , - , - , - i64); - -define @intrinsic_vfwmsac_mask_vv_nxv8f64_nxv8f32_nxv8f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv8f64_nxv8f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu -; CHECK-NEXT: vfwmsac.vv v8, v16, v20, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmsac.mask.nxv8f64.nxv8f32( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwmsac.nxv1f32.f16( - , - half, - , - i64); - -define @intrinsic_vfwmsac_vf_nxv1f32_f16_nxv1f16( %0, half %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv1f32_f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu -; CHECK-NEXT: vfwmsac.vf v8, fa0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmsac.nxv1f32.f16( - %0, - half %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwmsac.mask.nxv1f32.f16( - , - half, - , - , - i64); - -define @intrinsic_vfwmsac_mask_vf_nxv1f32_f16_nxv1f16( %0, half %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv1f32_f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu -; CHECK-NEXT: vfwmsac.vf v8, fa0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmsac.mask.nxv1f32.f16( - %0, - half %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwmsac.nxv2f32.f16( - , - half, - , - i64); - -define @intrinsic_vfwmsac_vf_nxv2f32_f16_nxv2f16( %0, half %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv2f32_f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu -; CHECK-NEXT: vfwmsac.vf v8, fa0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmsac.nxv2f32.f16( - %0, - half %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwmsac.mask.nxv2f32.f16( - , - half, - , - , - i64); - -define @intrinsic_vfwmsac_mask_vf_nxv2f32_f16_nxv2f16( %0, half %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv2f32_f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu -; CHECK-NEXT: vfwmsac.vf v8, fa0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmsac.mask.nxv2f32.f16( - %0, - half %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwmsac.nxv4f32.f16( - , - half, - , - i64); - -define @intrinsic_vfwmsac_vf_nxv4f32_f16_nxv4f16( %0, half %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv4f32_f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu -; CHECK-NEXT: vfwmsac.vf v8, fa0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmsac.nxv4f32.f16( - %0, - half %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwmsac.mask.nxv4f32.f16( - , - half, - , - , - i64); - -define @intrinsic_vfwmsac_mask_vf_nxv4f32_f16_nxv4f16( %0, half %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv4f32_f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu -; CHECK-NEXT: vfwmsac.vf v8, fa0, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmsac.mask.nxv4f32.f16( - %0, - half %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwmsac.nxv8f32.f16( - , - half, - , - i64); - -define @intrinsic_vfwmsac_vf_nxv8f32_f16_nxv8f16( %0, half %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv8f32_f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu -; CHECK-NEXT: vfwmsac.vf v8, fa0, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmsac.nxv8f32.f16( - %0, - half %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwmsac.mask.nxv8f32.f16( - , - half, - , - , - i64); - -define @intrinsic_vfwmsac_mask_vf_nxv8f32_f16_nxv8f16( %0, half %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv8f32_f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu -; CHECK-NEXT: vfwmsac.vf v8, fa0, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmsac.mask.nxv8f32.f16( - %0, - half %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwmsac.nxv16f32.f16( - , - half, - , - i64); - -define @intrinsic_vfwmsac_vf_nxv16f32_f16_nxv16f16( %0, half %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv16f32_f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu -; CHECK-NEXT: vfwmsac.vf v8, fa0, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmsac.nxv16f32.f16( - %0, - half %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwmsac.mask.nxv16f32.f16( - , - half, - , - , - i64); - -define @intrinsic_vfwmsac_mask_vf_nxv16f32_f16_nxv16f16( %0, half %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv16f32_f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu -; CHECK-NEXT: vfwmsac.vf v8, fa0, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmsac.mask.nxv16f32.f16( - %0, - half %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwmsac.nxv1f64.f32( - , - float, - , - i64); - -define @intrinsic_vfwmsac_vf_nxv1f64_f32_nxv1f32( %0, float %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv1f64_f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu -; CHECK-NEXT: vfwmsac.vf v8, fa0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmsac.nxv1f64.f32( - %0, - float %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwmsac.mask.nxv1f64.f32( - , - float, - , - , - i64); - -define @intrinsic_vfwmsac_mask_vf_nxv1f64_f32_nxv1f32( %0, float %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv1f64_f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu -; CHECK-NEXT: vfwmsac.vf v8, fa0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmsac.mask.nxv1f64.f32( - %0, - float %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwmsac.nxv2f64.f32( - , - float, - , - i64); - -define @intrinsic_vfwmsac_vf_nxv2f64_f32_nxv2f32( %0, float %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv2f64_f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu -; CHECK-NEXT: vfwmsac.vf v8, fa0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmsac.nxv2f64.f32( - %0, - float %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwmsac.mask.nxv2f64.f32( - , - float, - , - , - i64); - -define @intrinsic_vfwmsac_mask_vf_nxv2f64_f32_nxv2f32( %0, float %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv2f64_f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu -; CHECK-NEXT: vfwmsac.vf v8, fa0, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmsac.mask.nxv2f64.f32( - %0, - float %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwmsac.nxv4f64.f32( - , - float, - , - i64); - -define @intrinsic_vfwmsac_vf_nxv4f64_f32_nxv4f32( %0, float %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv4f64_f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu -; CHECK-NEXT: vfwmsac.vf v8, fa0, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmsac.nxv4f64.f32( - %0, - float %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwmsac.mask.nxv4f64.f32( - , - float, - , - , - i64); - -define @intrinsic_vfwmsac_mask_vf_nxv4f64_f32_nxv4f32( %0, float %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv4f64_f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu -; CHECK-NEXT: vfwmsac.vf v8, fa0, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmsac.mask.nxv4f64.f32( - %0, - float %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwmsac.nxv8f64.f32( - , - float, - , - i64); - -define @intrinsic_vfwmsac_vf_nxv8f64_f32_nxv8f32( %0, float %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv8f64_f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu -; CHECK-NEXT: vfwmsac.vf v8, fa0, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmsac.nxv8f64.f32( - %0, - float %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwmsac.mask.nxv8f64.f32( - , - float, - , - , - i64); - -define @intrinsic_vfwmsac_mask_vf_nxv8f64_f32_nxv8f32( %0, float %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv8f64_f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu -; CHECK-NEXT: vfwmsac.vf v8, fa0, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmsac.mask.nxv8f64.f32( - %0, - float %1, - %2, - %3, - i64 %4) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwmsac-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwmsac.ll similarity index 89% rename from llvm/test/CodeGen/RISCV/rvv/vfwmsac-rv32.ll rename to llvm/test/CodeGen/RISCV/rvv/vfwmsac.ll index 82c4fad996e7..884ee36575b4 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfwmsac-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwmsac.ll @@ -1,13 +1,15 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: -target-abi=ilp32d < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfwmsac.nxv1f32.nxv1f16( , , , - i32); + iXLen); -define @intrinsic_vfwmsac_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwmsac_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv1f32_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu @@ -18,7 +20,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -28,9 +30,9 @@ declare @llvm.riscv.vfwmsac.mask.nxv1f32.nxv1f16( , , , - i32); + iXLen); -define @intrinsic_vfwmsac_mask_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwmsac_mask_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv1f32_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu @@ -42,7 +44,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -51,9 +53,9 @@ declare @llvm.riscv.vfwmsac.nxv2f32.nxv2f16( , , , - i32); + iXLen); -define @intrinsic_vfwmsac_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwmsac_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv2f32_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu @@ -64,7 +66,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -74,9 +76,9 @@ declare @llvm.riscv.vfwmsac.mask.nxv2f32.nxv2f16( , , , - i32); + iXLen); -define @intrinsic_vfwmsac_mask_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwmsac_mask_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv2f32_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu @@ -88,7 +90,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -97,9 +99,9 @@ declare @llvm.riscv.vfwmsac.nxv4f32.nxv4f16( , , , - i32); + iXLen); -define @intrinsic_vfwmsac_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwmsac_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv4f32_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu @@ -110,7 +112,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -120,9 +122,9 @@ declare @llvm.riscv.vfwmsac.mask.nxv4f32.nxv4f16( , , , - i32); + iXLen); -define @intrinsic_vfwmsac_mask_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwmsac_mask_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv4f32_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu @@ -134,7 +136,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -143,9 +145,9 @@ declare @llvm.riscv.vfwmsac.nxv8f32.nxv8f16( , , , - i32); + iXLen); -define @intrinsic_vfwmsac_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwmsac_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv8f32_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu @@ -156,7 +158,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -166,9 +168,9 @@ declare @llvm.riscv.vfwmsac.mask.nxv8f32.nxv8f16( , , , - i32); + iXLen); -define @intrinsic_vfwmsac_mask_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwmsac_mask_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv8f32_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu @@ -180,7 +182,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -189,9 +191,9 @@ declare @llvm.riscv.vfwmsac.nxv16f32.nxv16f16( , , , - i32); + iXLen); -define @intrinsic_vfwmsac_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwmsac_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv16f32_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu @@ -202,7 +204,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -212,9 +214,9 @@ declare @llvm.riscv.vfwmsac.mask.nxv16f32.nxv16f16( , , , - i32); + iXLen); -define @intrinsic_vfwmsac_mask_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwmsac_mask_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv16f32_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu @@ -226,7 +228,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -235,9 +237,9 @@ declare @llvm.riscv.vfwmsac.nxv1f64.nxv1f32( , , , - i32); + iXLen); -define @intrinsic_vfwmsac_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwmsac_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv1f64_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu @@ -248,7 +250,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -258,9 +260,9 @@ declare @llvm.riscv.vfwmsac.mask.nxv1f64.nxv1f32( , , , - i32); + iXLen); -define @intrinsic_vfwmsac_mask_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwmsac_mask_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv1f64_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu @@ -272,7 +274,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -281,9 +283,9 @@ declare @llvm.riscv.vfwmsac.nxv2f64.nxv2f32( , , , - i32); + iXLen); -define @intrinsic_vfwmsac_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwmsac_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv2f64_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu @@ -294,7 +296,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -304,9 +306,9 @@ declare @llvm.riscv.vfwmsac.mask.nxv2f64.nxv2f32( , , , - i32); + iXLen); -define @intrinsic_vfwmsac_mask_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwmsac_mask_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv2f64_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu @@ -318,7 +320,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -327,9 +329,9 @@ declare @llvm.riscv.vfwmsac.nxv4f64.nxv4f32( , , , - i32); + iXLen); -define @intrinsic_vfwmsac_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwmsac_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv4f64_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu @@ -340,7 +342,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -350,9 +352,9 @@ declare @llvm.riscv.vfwmsac.mask.nxv4f64.nxv4f32( , , , - i32); + iXLen); -define @intrinsic_vfwmsac_mask_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwmsac_mask_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv4f64_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu @@ -364,7 +366,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -373,9 +375,9 @@ declare @llvm.riscv.vfwmsac.nxv8f64.nxv8f32( , , , - i32); + iXLen); -define @intrinsic_vfwmsac_vv_nxv8f64_nxv8f32_nxv8f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwmsac_vv_nxv8f64_nxv8f32_nxv8f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv8f64_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu @@ -386,7 +388,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -396,9 +398,9 @@ declare @llvm.riscv.vfwmsac.mask.nxv8f64.nxv8f32( , , , - i32); + iXLen); -define @intrinsic_vfwmsac_mask_vv_nxv8f64_nxv8f32_nxv8f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwmsac_mask_vv_nxv8f64_nxv8f32_nxv8f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv8f64_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu @@ -410,7 +412,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -419,9 +421,9 @@ declare @llvm.riscv.vfwmsac.nxv1f32.f16( , half, , - i32); + iXLen); -define @intrinsic_vfwmsac_vf_nxv1f32_f16_nxv1f16( %0, half %1, %2, i32 %3) nounwind { +define @intrinsic_vfwmsac_vf_nxv1f32_f16_nxv1f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv1f32_f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu @@ -432,7 +434,7 @@ entry: %0, half %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -442,9 +444,9 @@ declare @llvm.riscv.vfwmsac.mask.nxv1f32.f16( half, , , - i32); + iXLen); -define @intrinsic_vfwmsac_mask_vf_nxv1f32_f16_nxv1f16( %0, half %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwmsac_mask_vf_nxv1f32_f16_nxv1f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv1f32_f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu @@ -456,7 +458,7 @@ entry: half %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -465,9 +467,9 @@ declare @llvm.riscv.vfwmsac.nxv2f32.f16( , half, , - i32); + iXLen); -define @intrinsic_vfwmsac_vf_nxv2f32_f16_nxv2f16( %0, half %1, %2, i32 %3) nounwind { +define @intrinsic_vfwmsac_vf_nxv2f32_f16_nxv2f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv2f32_f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu @@ -478,7 +480,7 @@ entry: %0, half %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -488,9 +490,9 @@ declare @llvm.riscv.vfwmsac.mask.nxv2f32.f16( half, , , - i32); + iXLen); -define @intrinsic_vfwmsac_mask_vf_nxv2f32_f16_nxv2f16( %0, half %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwmsac_mask_vf_nxv2f32_f16_nxv2f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv2f32_f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu @@ -502,7 +504,7 @@ entry: half %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -511,9 +513,9 @@ declare @llvm.riscv.vfwmsac.nxv4f32.f16( , half, , - i32); + iXLen); -define @intrinsic_vfwmsac_vf_nxv4f32_f16_nxv4f16( %0, half %1, %2, i32 %3) nounwind { +define @intrinsic_vfwmsac_vf_nxv4f32_f16_nxv4f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv4f32_f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu @@ -524,7 +526,7 @@ entry: %0, half %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -534,9 +536,9 @@ declare @llvm.riscv.vfwmsac.mask.nxv4f32.f16( half, , , - i32); + iXLen); -define @intrinsic_vfwmsac_mask_vf_nxv4f32_f16_nxv4f16( %0, half %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwmsac_mask_vf_nxv4f32_f16_nxv4f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv4f32_f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu @@ -548,7 +550,7 @@ entry: half %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -557,9 +559,9 @@ declare @llvm.riscv.vfwmsac.nxv8f32.f16( , half, , - i32); + iXLen); -define @intrinsic_vfwmsac_vf_nxv8f32_f16_nxv8f16( %0, half %1, %2, i32 %3) nounwind { +define @intrinsic_vfwmsac_vf_nxv8f32_f16_nxv8f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv8f32_f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu @@ -570,7 +572,7 @@ entry: %0, half %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -580,9 +582,9 @@ declare @llvm.riscv.vfwmsac.mask.nxv8f32.f16( half, , , - i32); + iXLen); -define @intrinsic_vfwmsac_mask_vf_nxv8f32_f16_nxv8f16( %0, half %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwmsac_mask_vf_nxv8f32_f16_nxv8f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv8f32_f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu @@ -594,7 +596,7 @@ entry: half %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -603,9 +605,9 @@ declare @llvm.riscv.vfwmsac.nxv16f32.f16( , half, , - i32); + iXLen); -define @intrinsic_vfwmsac_vf_nxv16f32_f16_nxv16f16( %0, half %1, %2, i32 %3) nounwind { +define @intrinsic_vfwmsac_vf_nxv16f32_f16_nxv16f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv16f32_f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu @@ -616,7 +618,7 @@ entry: %0, half %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -626,9 +628,9 @@ declare @llvm.riscv.vfwmsac.mask.nxv16f32.f16( half, , , - i32); + iXLen); -define @intrinsic_vfwmsac_mask_vf_nxv16f32_f16_nxv16f16( %0, half %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwmsac_mask_vf_nxv16f32_f16_nxv16f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv16f32_f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu @@ -640,7 +642,7 @@ entry: half %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -649,9 +651,9 @@ declare @llvm.riscv.vfwmsac.nxv1f64.f32( , float, , - i32); + iXLen); -define @intrinsic_vfwmsac_vf_nxv1f64_f32_nxv1f32( %0, float %1, %2, i32 %3) nounwind { +define @intrinsic_vfwmsac_vf_nxv1f64_f32_nxv1f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv1f64_f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu @@ -662,7 +664,7 @@ entry: %0, float %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -672,9 +674,9 @@ declare @llvm.riscv.vfwmsac.mask.nxv1f64.f32( float, , , - i32); + iXLen); -define @intrinsic_vfwmsac_mask_vf_nxv1f64_f32_nxv1f32( %0, float %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwmsac_mask_vf_nxv1f64_f32_nxv1f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv1f64_f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu @@ -686,7 +688,7 @@ entry: float %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -695,9 +697,9 @@ declare @llvm.riscv.vfwmsac.nxv2f64.f32( , float, , - i32); + iXLen); -define @intrinsic_vfwmsac_vf_nxv2f64_f32_nxv2f32( %0, float %1, %2, i32 %3) nounwind { +define @intrinsic_vfwmsac_vf_nxv2f64_f32_nxv2f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv2f64_f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu @@ -708,7 +710,7 @@ entry: %0, float %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -718,9 +720,9 @@ declare @llvm.riscv.vfwmsac.mask.nxv2f64.f32( float, , , - i32); + iXLen); -define @intrinsic_vfwmsac_mask_vf_nxv2f64_f32_nxv2f32( %0, float %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwmsac_mask_vf_nxv2f64_f32_nxv2f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv2f64_f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu @@ -732,7 +734,7 @@ entry: float %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -741,9 +743,9 @@ declare @llvm.riscv.vfwmsac.nxv4f64.f32( , float, , - i32); + iXLen); -define @intrinsic_vfwmsac_vf_nxv4f64_f32_nxv4f32( %0, float %1, %2, i32 %3) nounwind { +define @intrinsic_vfwmsac_vf_nxv4f64_f32_nxv4f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv4f64_f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu @@ -754,7 +756,7 @@ entry: %0, float %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -764,9 +766,9 @@ declare @llvm.riscv.vfwmsac.mask.nxv4f64.f32( float, , , - i32); + iXLen); -define @intrinsic_vfwmsac_mask_vf_nxv4f64_f32_nxv4f32( %0, float %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwmsac_mask_vf_nxv4f64_f32_nxv4f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv4f64_f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu @@ -778,7 +780,7 @@ entry: float %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -787,9 +789,9 @@ declare @llvm.riscv.vfwmsac.nxv8f64.f32( , float, , - i32); + iXLen); -define @intrinsic_vfwmsac_vf_nxv8f64_f32_nxv8f32( %0, float %1, %2, i32 %3) nounwind { +define @intrinsic_vfwmsac_vf_nxv8f64_f32_nxv8f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv8f64_f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu @@ -800,7 +802,7 @@ entry: %0, float %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -810,9 +812,9 @@ declare @llvm.riscv.vfwmsac.mask.nxv8f64.f32( float, , , - i32); + iXLen); -define @intrinsic_vfwmsac_mask_vf_nxv8f64_f32_nxv8f32( %0, float %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwmsac_mask_vf_nxv8f64_f32_nxv8f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv8f64_f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu @@ -824,7 +826,7 @@ entry: float %1, %2, %3, - i32 %4) + iXLen %4) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwmul-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwmul-rv32.ll deleted file mode 100644 index 670c79975a2e..000000000000 --- a/llvm/test/CodeGen/RISCV/rvv/vfwmul-rv32.ll +++ /dev/null @@ -1,830 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: -target-abi=ilp32d < %s | FileCheck %s -declare @llvm.riscv.vfwmul.nxv1f32.nxv1f16.nxv1f16( - , - , - i32); - -define @intrinsic_vfwmul_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwmul_vv_nxv1f32_nxv1f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfwmul.vv v10, v8, v9 -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmul.nxv1f32.nxv1f16.nxv1f16( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfwmul.mask.nxv1f32.nxv1f16.nxv1f16( - , - , - , - , - i32, - i32); - -define @intrinsic_vfwmul_mask_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv1f32_nxv1f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfwmul.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmul.mask.nxv1f32.nxv1f16.nxv1f16( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfwmul.nxv2f32.nxv2f16.nxv2f16( - , - , - i32); - -define @intrinsic_vfwmul_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwmul_vv_nxv2f32_nxv2f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfwmul.vv v10, v8, v9 -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmul.nxv2f32.nxv2f16.nxv2f16( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfwmul.mask.nxv2f32.nxv2f16.nxv2f16( - , - , - , - , - i32, - i32); - -define @intrinsic_vfwmul_mask_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv2f32_nxv2f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfwmul.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmul.mask.nxv2f32.nxv2f16.nxv2f16( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfwmul.nxv4f32.nxv4f16.nxv4f16( - , - , - i32); - -define @intrinsic_vfwmul_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwmul_vv_nxv4f32_nxv4f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfwmul.vv v10, v8, v9 -; CHECK-NEXT: vmv2r.v v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmul.nxv4f32.nxv4f16.nxv4f16( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfwmul.mask.nxv4f32.nxv4f16.nxv4f16( - , - , - , - , - i32, - i32); - -define @intrinsic_vfwmul_mask_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv4f32_nxv4f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfwmul.vv v8, v10, v11, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmul.mask.nxv4f32.nxv4f16.nxv4f16( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfwmul.nxv8f32.nxv8f16.nxv8f16( - , - , - i32); - -define @intrinsic_vfwmul_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwmul_vv_nxv8f32_nxv8f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfwmul.vv v12, v8, v10 -; CHECK-NEXT: vmv4r.v v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmul.nxv8f32.nxv8f16.nxv8f16( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfwmul.mask.nxv8f32.nxv8f16.nxv8f16( - , - , - , - , - i32, - i32); - -define @intrinsic_vfwmul_mask_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv8f32_nxv8f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfwmul.vv v8, v12, v14, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmul.mask.nxv8f32.nxv8f16.nxv8f16( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfwmul.nxv16f32.nxv16f16.nxv16f16( - , - , - i32); - -define @intrinsic_vfwmul_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwmul_vv_nxv16f32_nxv16f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfwmul.vv v16, v8, v12 -; CHECK-NEXT: vmv8r.v v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmul.nxv16f32.nxv16f16.nxv16f16( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfwmul.mask.nxv16f32.nxv16f16.nxv16f16( - , - , - , - , - i32, - i32); - -define @intrinsic_vfwmul_mask_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv16f32_nxv16f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfwmul.vv v8, v16, v20, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmul.mask.nxv16f32.nxv16f16.nxv16f16( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfwmul.nxv1f64.nxv1f32.nxv1f32( - , - , - i32); - -define @intrinsic_vfwmul_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwmul_vv_nxv1f64_nxv1f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfwmul.vv v10, v8, v9 -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmul.nxv1f64.nxv1f32.nxv1f32( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfwmul.mask.nxv1f64.nxv1f32.nxv1f32( - , - , - , - , - i32, - i32); - -define @intrinsic_vfwmul_mask_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv1f64_nxv1f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfwmul.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmul.mask.nxv1f64.nxv1f32.nxv1f32( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfwmul.nxv2f64.nxv2f32.nxv2f32( - , - , - i32); - -define @intrinsic_vfwmul_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwmul_vv_nxv2f64_nxv2f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfwmul.vv v10, v8, v9 -; CHECK-NEXT: vmv2r.v v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmul.nxv2f64.nxv2f32.nxv2f32( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfwmul.mask.nxv2f64.nxv2f32.nxv2f32( - , - , - , - , - i32, - i32); - -define @intrinsic_vfwmul_mask_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv2f64_nxv2f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfwmul.vv v8, v10, v11, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmul.mask.nxv2f64.nxv2f32.nxv2f32( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfwmul.nxv4f64.nxv4f32.nxv4f32( - , - , - i32); - -define @intrinsic_vfwmul_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwmul_vv_nxv4f64_nxv4f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfwmul.vv v12, v8, v10 -; CHECK-NEXT: vmv4r.v v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmul.nxv4f64.nxv4f32.nxv4f32( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfwmul.mask.nxv4f64.nxv4f32.nxv4f32( - , - , - , - , - i32, - i32); - -define @intrinsic_vfwmul_mask_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv4f64_nxv4f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfwmul.vv v8, v12, v14, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmul.mask.nxv4f64.nxv4f32.nxv4f32( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfwmul.nxv8f64.nxv8f32.nxv8f32( - , - , - i32); - -define @intrinsic_vfwmul_vv_nxv8f64_nxv8f32_nxv8f32( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwmul_vv_nxv8f64_nxv8f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfwmul.vv v16, v8, v12 -; CHECK-NEXT: vmv8r.v v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmul.nxv8f64.nxv8f32.nxv8f32( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfwmul.mask.nxv8f64.nxv8f32.nxv8f32( - , - , - , - , - i32, - i32); - -define @intrinsic_vfwmul_mask_vv_nxv8f64_nxv8f32_nxv8f32( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv8f64_nxv8f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfwmul.vv v8, v16, v20, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmul.mask.nxv8f64.nxv8f32.nxv8f32( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfwmul.nxv1f32.nxv1f16.f16( - , - half, - i32); - -define @intrinsic_vfwmul_vf_nxv1f32_nxv1f16_f16( %0, half %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwmul_vf_nxv1f32_nxv1f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfwmul.vf v9, v8, fa0 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmul.nxv1f32.nxv1f16.f16( - %0, - half %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfwmul.mask.nxv1f32.nxv1f16.f16( - , - , - half, - , - i32, - i32); - -define @intrinsic_vfwmul_mask_vf_nxv1f32_nxv1f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv1f32_nxv1f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfwmul.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmul.mask.nxv1f32.nxv1f16.f16( - %0, - %1, - half %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfwmul.nxv2f32.nxv2f16.f16( - , - half, - i32); - -define @intrinsic_vfwmul_vf_nxv2f32_nxv2f16_f16( %0, half %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwmul_vf_nxv2f32_nxv2f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfwmul.vf v9, v8, fa0 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmul.nxv2f32.nxv2f16.f16( - %0, - half %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfwmul.mask.nxv2f32.nxv2f16.f16( - , - , - half, - , - i32, - i32); - -define @intrinsic_vfwmul_mask_vf_nxv2f32_nxv2f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv2f32_nxv2f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfwmul.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmul.mask.nxv2f32.nxv2f16.f16( - %0, - %1, - half %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfwmul.nxv4f32.nxv4f16.f16( - , - half, - i32); - -define @intrinsic_vfwmul_vf_nxv4f32_nxv4f16_f16( %0, half %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwmul_vf_nxv4f32_nxv4f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfwmul.vf v10, v8, fa0 -; CHECK-NEXT: vmv2r.v v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmul.nxv4f32.nxv4f16.f16( - %0, - half %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfwmul.mask.nxv4f32.nxv4f16.f16( - , - , - half, - , - i32, - i32); - -define @intrinsic_vfwmul_mask_vf_nxv4f32_nxv4f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv4f32_nxv4f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfwmul.vf v8, v10, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmul.mask.nxv4f32.nxv4f16.f16( - %0, - %1, - half %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfwmul.nxv8f32.nxv8f16.f16( - , - half, - i32); - -define @intrinsic_vfwmul_vf_nxv8f32_nxv8f16_f16( %0, half %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwmul_vf_nxv8f32_nxv8f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfwmul.vf v12, v8, fa0 -; CHECK-NEXT: vmv4r.v v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmul.nxv8f32.nxv8f16.f16( - %0, - half %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfwmul.mask.nxv8f32.nxv8f16.f16( - , - , - half, - , - i32, - i32); - -define @intrinsic_vfwmul_mask_vf_nxv8f32_nxv8f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv8f32_nxv8f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfwmul.vf v8, v12, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmul.mask.nxv8f32.nxv8f16.f16( - %0, - %1, - half %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfwmul.nxv16f32.nxv16f16.f16( - , - half, - i32); - -define @intrinsic_vfwmul_vf_nxv16f32_nxv16f16_f16( %0, half %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwmul_vf_nxv16f32_nxv16f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfwmul.vf v16, v8, fa0 -; CHECK-NEXT: vmv8r.v v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmul.nxv16f32.nxv16f16.f16( - %0, - half %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfwmul.mask.nxv16f32.nxv16f16.f16( - , - , - half, - , - i32, - i32); - -define @intrinsic_vfwmul_mask_vf_nxv16f32_nxv16f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv16f32_nxv16f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfwmul.vf v8, v16, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmul.mask.nxv16f32.nxv16f16.f16( - %0, - %1, - half %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfwmul.nxv1f64.nxv1f32.f32( - , - float, - i32); - -define @intrinsic_vfwmul_vf_nxv1f64_nxv1f32_f32( %0, float %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwmul_vf_nxv1f64_nxv1f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfwmul.vf v9, v8, fa0 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmul.nxv1f64.nxv1f32.f32( - %0, - float %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfwmul.mask.nxv1f64.nxv1f32.f32( - , - , - float, - , - i32, - i32); - -define @intrinsic_vfwmul_mask_vf_nxv1f64_nxv1f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv1f64_nxv1f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfwmul.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmul.mask.nxv1f64.nxv1f32.f32( - %0, - %1, - float %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfwmul.nxv2f64.nxv2f32.f32( - , - float, - i32); - -define @intrinsic_vfwmul_vf_nxv2f64_nxv2f32_f32( %0, float %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwmul_vf_nxv2f64_nxv2f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfwmul.vf v10, v8, fa0 -; CHECK-NEXT: vmv2r.v v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmul.nxv2f64.nxv2f32.f32( - %0, - float %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfwmul.mask.nxv2f64.nxv2f32.f32( - , - , - float, - , - i32, - i32); - -define @intrinsic_vfwmul_mask_vf_nxv2f64_nxv2f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv2f64_nxv2f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfwmul.vf v8, v10, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmul.mask.nxv2f64.nxv2f32.f32( - %0, - %1, - float %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfwmul.nxv4f64.nxv4f32.f32( - , - float, - i32); - -define @intrinsic_vfwmul_vf_nxv4f64_nxv4f32_f32( %0, float %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwmul_vf_nxv4f64_nxv4f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfwmul.vf v12, v8, fa0 -; CHECK-NEXT: vmv4r.v v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmul.nxv4f64.nxv4f32.f32( - %0, - float %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfwmul.mask.nxv4f64.nxv4f32.f32( - , - , - float, - , - i32, - i32); - -define @intrinsic_vfwmul_mask_vf_nxv4f64_nxv4f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv4f64_nxv4f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfwmul.vf v8, v12, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmul.mask.nxv4f64.nxv4f32.f32( - %0, - %1, - float %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfwmul.nxv8f64.nxv8f32.f32( - , - float, - i32); - -define @intrinsic_vfwmul_vf_nxv8f64_nxv8f32_f32( %0, float %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwmul_vf_nxv8f64_nxv8f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfwmul.vf v16, v8, fa0 -; CHECK-NEXT: vmv8r.v v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmul.nxv8f64.nxv8f32.f32( - %0, - float %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfwmul.mask.nxv8f64.nxv8f32.f32( - , - , - float, - , - i32, - i32); - -define @intrinsic_vfwmul_mask_vf_nxv8f64_nxv8f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv8f64_nxv8f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfwmul.vf v8, v16, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwmul.mask.nxv8f64.nxv8f32.f32( - %0, - %1, - float %2, - %3, - i32 %4, i32 1) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwmul-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwmul.ll similarity index 86% rename from llvm/test/CodeGen/RISCV/rvv/vfwmul-rv64.ll rename to llvm/test/CodeGen/RISCV/rvv/vfwmul.ll index fc7d8dcb59e3..b1ec8464047e 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfwmul-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwmul.ll @@ -1,12 +1,14 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: -target-abi=lp64d < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfwmul.nxv1f32.nxv1f16.nxv1f16( , , - i64); + iXLen); -define @intrinsic_vfwmul_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, i64 %2) nounwind { +define @intrinsic_vfwmul_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_vv_nxv1f32_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -17,7 +19,7 @@ entry: %a = call @llvm.riscv.vfwmul.nxv1f32.nxv1f16.nxv1f16( %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -27,10 +29,10 @@ declare @llvm.riscv.vfwmul.mask.nxv1f32.nxv1f16.nxv1f16( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfwmul_mask_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfwmul_mask_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv1f32_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -42,7 +44,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -50,9 +52,9 @@ entry: declare @llvm.riscv.vfwmul.nxv2f32.nxv2f16.nxv2f16( , , - i64); + iXLen); -define @intrinsic_vfwmul_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, i64 %2) nounwind { +define @intrinsic_vfwmul_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_vv_nxv2f32_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -63,7 +65,7 @@ entry: %a = call @llvm.riscv.vfwmul.nxv2f32.nxv2f16.nxv2f16( %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -73,10 +75,10 @@ declare @llvm.riscv.vfwmul.mask.nxv2f32.nxv2f16.nxv2f16( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfwmul_mask_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfwmul_mask_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv2f32_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -88,7 +90,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -96,9 +98,9 @@ entry: declare @llvm.riscv.vfwmul.nxv4f32.nxv4f16.nxv4f16( , , - i64); + iXLen); -define @intrinsic_vfwmul_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, i64 %2) nounwind { +define @intrinsic_vfwmul_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_vv_nxv4f32_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -109,7 +111,7 @@ entry: %a = call @llvm.riscv.vfwmul.nxv4f32.nxv4f16.nxv4f16( %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -119,10 +121,10 @@ declare @llvm.riscv.vfwmul.mask.nxv4f32.nxv4f16.nxv4f16( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfwmul_mask_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfwmul_mask_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv4f32_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -134,7 +136,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -142,9 +144,9 @@ entry: declare @llvm.riscv.vfwmul.nxv8f32.nxv8f16.nxv8f16( , , - i64); + iXLen); -define @intrinsic_vfwmul_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, i64 %2) nounwind { +define @intrinsic_vfwmul_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_vv_nxv8f32_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -155,7 +157,7 @@ entry: %a = call @llvm.riscv.vfwmul.nxv8f32.nxv8f16.nxv8f16( %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -165,10 +167,10 @@ declare @llvm.riscv.vfwmul.mask.nxv8f32.nxv8f16.nxv8f16( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfwmul_mask_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfwmul_mask_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv8f32_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -180,7 +182,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -188,9 +190,9 @@ entry: declare @llvm.riscv.vfwmul.nxv16f32.nxv16f16.nxv16f16( , , - i64); + iXLen); -define @intrinsic_vfwmul_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, i64 %2) nounwind { +define @intrinsic_vfwmul_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_vv_nxv16f32_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -201,7 +203,7 @@ entry: %a = call @llvm.riscv.vfwmul.nxv16f32.nxv16f16.nxv16f16( %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -211,10 +213,10 @@ declare @llvm.riscv.vfwmul.mask.nxv16f32.nxv16f16.nxv16f16 , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfwmul_mask_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfwmul_mask_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv16f32_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -226,7 +228,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -234,9 +236,9 @@ entry: declare @llvm.riscv.vfwmul.nxv1f64.nxv1f32.nxv1f32( , , - i64); + iXLen); -define @intrinsic_vfwmul_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, i64 %2) nounwind { +define @intrinsic_vfwmul_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_vv_nxv1f64_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -247,7 +249,7 @@ entry: %a = call @llvm.riscv.vfwmul.nxv1f64.nxv1f32.nxv1f32( %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -257,10 +259,10 @@ declare @llvm.riscv.vfwmul.mask.nxv1f64.nxv1f32.nxv1f32( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfwmul_mask_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfwmul_mask_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv1f64_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -272,7 +274,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -280,9 +282,9 @@ entry: declare @llvm.riscv.vfwmul.nxv2f64.nxv2f32.nxv2f32( , , - i64); + iXLen); -define @intrinsic_vfwmul_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, i64 %2) nounwind { +define @intrinsic_vfwmul_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_vv_nxv2f64_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -293,7 +295,7 @@ entry: %a = call @llvm.riscv.vfwmul.nxv2f64.nxv2f32.nxv2f32( %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -303,10 +305,10 @@ declare @llvm.riscv.vfwmul.mask.nxv2f64.nxv2f32.nxv2f32( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfwmul_mask_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfwmul_mask_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv2f64_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -318,7 +320,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -326,9 +328,9 @@ entry: declare @llvm.riscv.vfwmul.nxv4f64.nxv4f32.nxv4f32( , , - i64); + iXLen); -define @intrinsic_vfwmul_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, i64 %2) nounwind { +define @intrinsic_vfwmul_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_vv_nxv4f64_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -339,7 +341,7 @@ entry: %a = call @llvm.riscv.vfwmul.nxv4f64.nxv4f32.nxv4f32( %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -349,10 +351,10 @@ declare @llvm.riscv.vfwmul.mask.nxv4f64.nxv4f32.nxv4f32( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfwmul_mask_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfwmul_mask_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv4f64_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -364,7 +366,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -372,9 +374,9 @@ entry: declare @llvm.riscv.vfwmul.nxv8f64.nxv8f32.nxv8f32( , , - i64); + iXLen); -define @intrinsic_vfwmul_vv_nxv8f64_nxv8f32_nxv8f32( %0, %1, i64 %2) nounwind { +define @intrinsic_vfwmul_vv_nxv8f64_nxv8f32_nxv8f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_vv_nxv8f64_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -385,7 +387,7 @@ entry: %a = call @llvm.riscv.vfwmul.nxv8f64.nxv8f32.nxv8f32( %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -395,10 +397,10 @@ declare @llvm.riscv.vfwmul.mask.nxv8f64.nxv8f32.nxv8f32( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfwmul_mask_vv_nxv8f64_nxv8f32_nxv8f32( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfwmul_mask_vv_nxv8f64_nxv8f32_nxv8f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv8f64_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -410,7 +412,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -418,9 +420,9 @@ entry: declare @llvm.riscv.vfwmul.nxv1f32.nxv1f16.f16( , half, - i64); + iXLen); -define @intrinsic_vfwmul_vf_nxv1f32_nxv1f16_f16( %0, half %1, i64 %2) nounwind { +define @intrinsic_vfwmul_vf_nxv1f32_nxv1f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_vf_nxv1f32_nxv1f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -431,7 +433,7 @@ entry: %a = call @llvm.riscv.vfwmul.nxv1f32.nxv1f16.f16( %0, half %1, - i64 %2) + iXLen %2) ret %a } @@ -441,10 +443,10 @@ declare @llvm.riscv.vfwmul.mask.nxv1f32.nxv1f16.f16( , half, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfwmul_mask_vf_nxv1f32_nxv1f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { +define @intrinsic_vfwmul_mask_vf_nxv1f32_nxv1f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv1f32_nxv1f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -456,7 +458,7 @@ entry: %1, half %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -464,9 +466,9 @@ entry: declare @llvm.riscv.vfwmul.nxv2f32.nxv2f16.f16( , half, - i64); + iXLen); -define @intrinsic_vfwmul_vf_nxv2f32_nxv2f16_f16( %0, half %1, i64 %2) nounwind { +define @intrinsic_vfwmul_vf_nxv2f32_nxv2f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_vf_nxv2f32_nxv2f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -477,7 +479,7 @@ entry: %a = call @llvm.riscv.vfwmul.nxv2f32.nxv2f16.f16( %0, half %1, - i64 %2) + iXLen %2) ret %a } @@ -487,10 +489,10 @@ declare @llvm.riscv.vfwmul.mask.nxv2f32.nxv2f16.f16( , half, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfwmul_mask_vf_nxv2f32_nxv2f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { +define @intrinsic_vfwmul_mask_vf_nxv2f32_nxv2f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv2f32_nxv2f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -502,7 +504,7 @@ entry: %1, half %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -510,9 +512,9 @@ entry: declare @llvm.riscv.vfwmul.nxv4f32.nxv4f16.f16( , half, - i64); + iXLen); -define @intrinsic_vfwmul_vf_nxv4f32_nxv4f16_f16( %0, half %1, i64 %2) nounwind { +define @intrinsic_vfwmul_vf_nxv4f32_nxv4f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_vf_nxv4f32_nxv4f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -523,7 +525,7 @@ entry: %a = call @llvm.riscv.vfwmul.nxv4f32.nxv4f16.f16( %0, half %1, - i64 %2) + iXLen %2) ret %a } @@ -533,10 +535,10 @@ declare @llvm.riscv.vfwmul.mask.nxv4f32.nxv4f16.f16( , half, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfwmul_mask_vf_nxv4f32_nxv4f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { +define @intrinsic_vfwmul_mask_vf_nxv4f32_nxv4f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv4f32_nxv4f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -548,7 +550,7 @@ entry: %1, half %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -556,9 +558,9 @@ entry: declare @llvm.riscv.vfwmul.nxv8f32.nxv8f16.f16( , half, - i64); + iXLen); -define @intrinsic_vfwmul_vf_nxv8f32_nxv8f16_f16( %0, half %1, i64 %2) nounwind { +define @intrinsic_vfwmul_vf_nxv8f32_nxv8f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_vf_nxv8f32_nxv8f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -569,7 +571,7 @@ entry: %a = call @llvm.riscv.vfwmul.nxv8f32.nxv8f16.f16( %0, half %1, - i64 %2) + iXLen %2) ret %a } @@ -579,10 +581,10 @@ declare @llvm.riscv.vfwmul.mask.nxv8f32.nxv8f16.f16( , half, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfwmul_mask_vf_nxv8f32_nxv8f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { +define @intrinsic_vfwmul_mask_vf_nxv8f32_nxv8f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv8f32_nxv8f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -594,7 +596,7 @@ entry: %1, half %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -602,9 +604,9 @@ entry: declare @llvm.riscv.vfwmul.nxv16f32.nxv16f16.f16( , half, - i64); + iXLen); -define @intrinsic_vfwmul_vf_nxv16f32_nxv16f16_f16( %0, half %1, i64 %2) nounwind { +define @intrinsic_vfwmul_vf_nxv16f32_nxv16f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_vf_nxv16f32_nxv16f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -615,7 +617,7 @@ entry: %a = call @llvm.riscv.vfwmul.nxv16f32.nxv16f16.f16( %0, half %1, - i64 %2) + iXLen %2) ret %a } @@ -625,10 +627,10 @@ declare @llvm.riscv.vfwmul.mask.nxv16f32.nxv16f16.f16( , half, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfwmul_mask_vf_nxv16f32_nxv16f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { +define @intrinsic_vfwmul_mask_vf_nxv16f32_nxv16f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv16f32_nxv16f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -640,7 +642,7 @@ entry: %1, half %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -648,9 +650,9 @@ entry: declare @llvm.riscv.vfwmul.nxv1f64.nxv1f32.f32( , float, - i64); + iXLen); -define @intrinsic_vfwmul_vf_nxv1f64_nxv1f32_f32( %0, float %1, i64 %2) nounwind { +define @intrinsic_vfwmul_vf_nxv1f64_nxv1f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_vf_nxv1f64_nxv1f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -661,7 +663,7 @@ entry: %a = call @llvm.riscv.vfwmul.nxv1f64.nxv1f32.f32( %0, float %1, - i64 %2) + iXLen %2) ret %a } @@ -671,10 +673,10 @@ declare @llvm.riscv.vfwmul.mask.nxv1f64.nxv1f32.f32( , float, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfwmul_mask_vf_nxv1f64_nxv1f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { +define @intrinsic_vfwmul_mask_vf_nxv1f64_nxv1f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv1f64_nxv1f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -686,7 +688,7 @@ entry: %1, float %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -694,9 +696,9 @@ entry: declare @llvm.riscv.vfwmul.nxv2f64.nxv2f32.f32( , float, - i64); + iXLen); -define @intrinsic_vfwmul_vf_nxv2f64_nxv2f32_f32( %0, float %1, i64 %2) nounwind { +define @intrinsic_vfwmul_vf_nxv2f64_nxv2f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_vf_nxv2f64_nxv2f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -707,7 +709,7 @@ entry: %a = call @llvm.riscv.vfwmul.nxv2f64.nxv2f32.f32( %0, float %1, - i64 %2) + iXLen %2) ret %a } @@ -717,10 +719,10 @@ declare @llvm.riscv.vfwmul.mask.nxv2f64.nxv2f32.f32( , float, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfwmul_mask_vf_nxv2f64_nxv2f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { +define @intrinsic_vfwmul_mask_vf_nxv2f64_nxv2f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv2f64_nxv2f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -732,7 +734,7 @@ entry: %1, float %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -740,9 +742,9 @@ entry: declare @llvm.riscv.vfwmul.nxv4f64.nxv4f32.f32( , float, - i64); + iXLen); -define @intrinsic_vfwmul_vf_nxv4f64_nxv4f32_f32( %0, float %1, i64 %2) nounwind { +define @intrinsic_vfwmul_vf_nxv4f64_nxv4f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_vf_nxv4f64_nxv4f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -753,7 +755,7 @@ entry: %a = call @llvm.riscv.vfwmul.nxv4f64.nxv4f32.f32( %0, float %1, - i64 %2) + iXLen %2) ret %a } @@ -763,10 +765,10 @@ declare @llvm.riscv.vfwmul.mask.nxv4f64.nxv4f32.f32( , float, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfwmul_mask_vf_nxv4f64_nxv4f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { +define @intrinsic_vfwmul_mask_vf_nxv4f64_nxv4f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv4f64_nxv4f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -778,7 +780,7 @@ entry: %1, float %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -786,9 +788,9 @@ entry: declare @llvm.riscv.vfwmul.nxv8f64.nxv8f32.f32( , float, - i64); + iXLen); -define @intrinsic_vfwmul_vf_nxv8f64_nxv8f32_f32( %0, float %1, i64 %2) nounwind { +define @intrinsic_vfwmul_vf_nxv8f64_nxv8f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_vf_nxv8f64_nxv8f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -799,7 +801,7 @@ entry: %a = call @llvm.riscv.vfwmul.nxv8f64.nxv8f32.f32( %0, float %1, - i64 %2) + iXLen %2) ret %a } @@ -809,10 +811,10 @@ declare @llvm.riscv.vfwmul.mask.nxv8f64.nxv8f32.f32( , float, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfwmul_mask_vf_nxv8f64_nxv8f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { +define @intrinsic_vfwmul_mask_vf_nxv8f64_nxv8f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv8f64_nxv8f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -824,7 +826,7 @@ entry: %1, float %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwnmacc-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwnmacc-rv64.ll deleted file mode 100644 index ff2b40cfac2c..000000000000 --- a/llvm/test/CodeGen/RISCV/rvv/vfwnmacc-rv64.ll +++ /dev/null @@ -1,830 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: -target-abi=lp64d < %s | FileCheck %s -declare @llvm.riscv.vfwnmacc.nxv1f32.nxv1f16( - , - , - , - i64); - -define @intrinsic_vfwnmacc_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv1f32_nxv1f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu -; CHECK-NEXT: vfwnmacc.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwnmacc.nxv1f32.nxv1f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwnmacc.mask.nxv1f32.nxv1f16( - , - , - , - , - i64); - -define @intrinsic_vfwnmacc_mask_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv1f32_nxv1f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu -; CHECK-NEXT: vfwnmacc.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwnmacc.mask.nxv1f32.nxv1f16( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwnmacc.nxv2f32.nxv2f16( - , - , - , - i64); - -define @intrinsic_vfwnmacc_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv2f32_nxv2f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu -; CHECK-NEXT: vfwnmacc.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwnmacc.nxv2f32.nxv2f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwnmacc.mask.nxv2f32.nxv2f16( - , - , - , - , - i64); - -define @intrinsic_vfwnmacc_mask_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv2f32_nxv2f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu -; CHECK-NEXT: vfwnmacc.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwnmacc.mask.nxv2f32.nxv2f16( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwnmacc.nxv4f32.nxv4f16( - , - , - , - i64); - -define @intrinsic_vfwnmacc_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv4f32_nxv4f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu -; CHECK-NEXT: vfwnmacc.vv v8, v10, v11 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwnmacc.nxv4f32.nxv4f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwnmacc.mask.nxv4f32.nxv4f16( - , - , - , - , - i64); - -define @intrinsic_vfwnmacc_mask_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv4f32_nxv4f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu -; CHECK-NEXT: vfwnmacc.vv v8, v10, v11, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwnmacc.mask.nxv4f32.nxv4f16( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwnmacc.nxv8f32.nxv8f16( - , - , - , - i64); - -define @intrinsic_vfwnmacc_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv8f32_nxv8f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu -; CHECK-NEXT: vfwnmacc.vv v8, v12, v14 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwnmacc.nxv8f32.nxv8f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwnmacc.mask.nxv8f32.nxv8f16( - , - , - , - , - i64); - -define @intrinsic_vfwnmacc_mask_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv8f32_nxv8f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu -; CHECK-NEXT: vfwnmacc.vv v8, v12, v14, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwnmacc.mask.nxv8f32.nxv8f16( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwnmacc.nxv16f32.nxv16f16( - , - , - , - i64); - -define @intrinsic_vfwnmacc_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv16f32_nxv16f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu -; CHECK-NEXT: vfwnmacc.vv v8, v16, v20 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwnmacc.nxv16f32.nxv16f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwnmacc.mask.nxv16f32.nxv16f16( - , - , - , - , - i64); - -define @intrinsic_vfwnmacc_mask_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv16f32_nxv16f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu -; CHECK-NEXT: vfwnmacc.vv v8, v16, v20, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwnmacc.mask.nxv16f32.nxv16f16( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwnmacc.nxv1f64.nxv1f32( - , - , - , - i64); - -define @intrinsic_vfwnmacc_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv1f64_nxv1f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu -; CHECK-NEXT: vfwnmacc.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwnmacc.nxv1f64.nxv1f32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwnmacc.mask.nxv1f64.nxv1f32( - , - , - , - , - i64); - -define @intrinsic_vfwnmacc_mask_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv1f64_nxv1f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu -; CHECK-NEXT: vfwnmacc.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwnmacc.mask.nxv1f64.nxv1f32( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwnmacc.nxv2f64.nxv2f32( - , - , - , - i64); - -define @intrinsic_vfwnmacc_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv2f64_nxv2f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu -; CHECK-NEXT: vfwnmacc.vv v8, v10, v11 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwnmacc.nxv2f64.nxv2f32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwnmacc.mask.nxv2f64.nxv2f32( - , - , - , - , - i64); - -define @intrinsic_vfwnmacc_mask_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv2f64_nxv2f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu -; CHECK-NEXT: vfwnmacc.vv v8, v10, v11, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwnmacc.mask.nxv2f64.nxv2f32( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwnmacc.nxv4f64.nxv4f32( - , - , - , - i64); - -define @intrinsic_vfwnmacc_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv4f64_nxv4f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu -; CHECK-NEXT: vfwnmacc.vv v8, v12, v14 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwnmacc.nxv4f64.nxv4f32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwnmacc.mask.nxv4f64.nxv4f32( - , - , - , - , - i64); - -define @intrinsic_vfwnmacc_mask_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv4f64_nxv4f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu -; CHECK-NEXT: vfwnmacc.vv v8, v12, v14, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwnmacc.mask.nxv4f64.nxv4f32( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwnmacc.nxv8f64.nxv8f32( - , - , - , - i64); - -define @intrinsic_vfwnmacc_vv_nxv8f64_nxv8f32_nxv8f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv8f64_nxv8f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu -; CHECK-NEXT: vfwnmacc.vv v8, v16, v20 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwnmacc.nxv8f64.nxv8f32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwnmacc.mask.nxv8f64.nxv8f32( - , - , - , - , - i64); - -define @intrinsic_vfwnmacc_mask_vv_nxv8f64_nxv8f32_nxv8f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv8f64_nxv8f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu -; CHECK-NEXT: vfwnmacc.vv v8, v16, v20, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwnmacc.mask.nxv8f64.nxv8f32( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwnmacc.nxv1f32.f16( - , - half, - , - i64); - -define @intrinsic_vfwnmacc_vf_nxv1f32_f16_nxv1f16( %0, half %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv1f32_f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu -; CHECK-NEXT: vfwnmacc.vf v8, fa0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwnmacc.nxv1f32.f16( - %0, - half %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwnmacc.mask.nxv1f32.f16( - , - half, - , - , - i64); - -define @intrinsic_vfwnmacc_mask_vf_nxv1f32_f16_nxv1f16( %0, half %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv1f32_f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu -; CHECK-NEXT: vfwnmacc.vf v8, fa0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwnmacc.mask.nxv1f32.f16( - %0, - half %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwnmacc.nxv2f32.f16( - , - half, - , - i64); - -define @intrinsic_vfwnmacc_vf_nxv2f32_f16_nxv2f16( %0, half %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv2f32_f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu -; CHECK-NEXT: vfwnmacc.vf v8, fa0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwnmacc.nxv2f32.f16( - %0, - half %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwnmacc.mask.nxv2f32.f16( - , - half, - , - , - i64); - -define @intrinsic_vfwnmacc_mask_vf_nxv2f32_f16_nxv2f16( %0, half %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv2f32_f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu -; CHECK-NEXT: vfwnmacc.vf v8, fa0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwnmacc.mask.nxv2f32.f16( - %0, - half %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwnmacc.nxv4f32.f16( - , - half, - , - i64); - -define @intrinsic_vfwnmacc_vf_nxv4f32_f16_nxv4f16( %0, half %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv4f32_f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu -; CHECK-NEXT: vfwnmacc.vf v8, fa0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwnmacc.nxv4f32.f16( - %0, - half %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwnmacc.mask.nxv4f32.f16( - , - half, - , - , - i64); - -define @intrinsic_vfwnmacc_mask_vf_nxv4f32_f16_nxv4f16( %0, half %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv4f32_f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu -; CHECK-NEXT: vfwnmacc.vf v8, fa0, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwnmacc.mask.nxv4f32.f16( - %0, - half %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwnmacc.nxv8f32.f16( - , - half, - , - i64); - -define @intrinsic_vfwnmacc_vf_nxv8f32_f16_nxv8f16( %0, half %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv8f32_f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu -; CHECK-NEXT: vfwnmacc.vf v8, fa0, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwnmacc.nxv8f32.f16( - %0, - half %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwnmacc.mask.nxv8f32.f16( - , - half, - , - , - i64); - -define @intrinsic_vfwnmacc_mask_vf_nxv8f32_f16_nxv8f16( %0, half %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv8f32_f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu -; CHECK-NEXT: vfwnmacc.vf v8, fa0, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwnmacc.mask.nxv8f32.f16( - %0, - half %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwnmacc.nxv16f32.f16( - , - half, - , - i64); - -define @intrinsic_vfwnmacc_vf_nxv16f32_f16_nxv16f16( %0, half %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv16f32_f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu -; CHECK-NEXT: vfwnmacc.vf v8, fa0, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwnmacc.nxv16f32.f16( - %0, - half %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwnmacc.mask.nxv16f32.f16( - , - half, - , - , - i64); - -define @intrinsic_vfwnmacc_mask_vf_nxv16f32_f16_nxv16f16( %0, half %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv16f32_f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu -; CHECK-NEXT: vfwnmacc.vf v8, fa0, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwnmacc.mask.nxv16f32.f16( - %0, - half %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwnmacc.nxv1f64.f32( - , - float, - , - i64); - -define @intrinsic_vfwnmacc_vf_nxv1f64_f32_nxv1f32( %0, float %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv1f64_f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu -; CHECK-NEXT: vfwnmacc.vf v8, fa0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwnmacc.nxv1f64.f32( - %0, - float %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwnmacc.mask.nxv1f64.f32( - , - float, - , - , - i64); - -define @intrinsic_vfwnmacc_mask_vf_nxv1f64_f32_nxv1f32( %0, float %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv1f64_f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu -; CHECK-NEXT: vfwnmacc.vf v8, fa0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwnmacc.mask.nxv1f64.f32( - %0, - float %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwnmacc.nxv2f64.f32( - , - float, - , - i64); - -define @intrinsic_vfwnmacc_vf_nxv2f64_f32_nxv2f32( %0, float %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv2f64_f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu -; CHECK-NEXT: vfwnmacc.vf v8, fa0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwnmacc.nxv2f64.f32( - %0, - float %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwnmacc.mask.nxv2f64.f32( - , - float, - , - , - i64); - -define @intrinsic_vfwnmacc_mask_vf_nxv2f64_f32_nxv2f32( %0, float %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv2f64_f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu -; CHECK-NEXT: vfwnmacc.vf v8, fa0, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwnmacc.mask.nxv2f64.f32( - %0, - float %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwnmacc.nxv4f64.f32( - , - float, - , - i64); - -define @intrinsic_vfwnmacc_vf_nxv4f64_f32_nxv4f32( %0, float %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv4f64_f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu -; CHECK-NEXT: vfwnmacc.vf v8, fa0, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwnmacc.nxv4f64.f32( - %0, - float %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwnmacc.mask.nxv4f64.f32( - , - float, - , - , - i64); - -define @intrinsic_vfwnmacc_mask_vf_nxv4f64_f32_nxv4f32( %0, float %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv4f64_f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu -; CHECK-NEXT: vfwnmacc.vf v8, fa0, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwnmacc.mask.nxv4f64.f32( - %0, - float %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwnmacc.nxv8f64.f32( - , - float, - , - i64); - -define @intrinsic_vfwnmacc_vf_nxv8f64_f32_nxv8f32( %0, float %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv8f64_f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu -; CHECK-NEXT: vfwnmacc.vf v8, fa0, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwnmacc.nxv8f64.f32( - %0, - float %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwnmacc.mask.nxv8f64.f32( - , - float, - , - , - i64); - -define @intrinsic_vfwnmacc_mask_vf_nxv8f64_f32_nxv8f32( %0, float %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv8f64_f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu -; CHECK-NEXT: vfwnmacc.vf v8, fa0, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwnmacc.mask.nxv8f64.f32( - %0, - float %1, - %2, - %3, - i64 %4) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwnmacc-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwnmacc.ll similarity index 89% rename from llvm/test/CodeGen/RISCV/rvv/vfwnmacc-rv32.ll rename to llvm/test/CodeGen/RISCV/rvv/vfwnmacc.ll index 02842609f568..4ccd0f8c5583 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfwnmacc-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwnmacc.ll @@ -1,13 +1,15 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: -target-abi=ilp32d < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfwnmacc.nxv1f32.nxv1f16( , , , - i32); + iXLen); -define @intrinsic_vfwnmacc_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwnmacc_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv1f32_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu @@ -18,7 +20,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -28,9 +30,9 @@ declare @llvm.riscv.vfwnmacc.mask.nxv1f32.nxv1f16( , , , - i32); + iXLen); -define @intrinsic_vfwnmacc_mask_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwnmacc_mask_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv1f32_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu @@ -42,7 +44,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -51,9 +53,9 @@ declare @llvm.riscv.vfwnmacc.nxv2f32.nxv2f16( , , , - i32); + iXLen); -define @intrinsic_vfwnmacc_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwnmacc_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv2f32_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu @@ -64,7 +66,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -74,9 +76,9 @@ declare @llvm.riscv.vfwnmacc.mask.nxv2f32.nxv2f16( , , , - i32); + iXLen); -define @intrinsic_vfwnmacc_mask_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwnmacc_mask_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv2f32_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu @@ -88,7 +90,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -97,9 +99,9 @@ declare @llvm.riscv.vfwnmacc.nxv4f32.nxv4f16( , , , - i32); + iXLen); -define @intrinsic_vfwnmacc_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwnmacc_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv4f32_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu @@ -110,7 +112,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -120,9 +122,9 @@ declare @llvm.riscv.vfwnmacc.mask.nxv4f32.nxv4f16( , , , - i32); + iXLen); -define @intrinsic_vfwnmacc_mask_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwnmacc_mask_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv4f32_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu @@ -134,7 +136,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -143,9 +145,9 @@ declare @llvm.riscv.vfwnmacc.nxv8f32.nxv8f16( , , , - i32); + iXLen); -define @intrinsic_vfwnmacc_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwnmacc_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv8f32_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu @@ -156,7 +158,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -166,9 +168,9 @@ declare @llvm.riscv.vfwnmacc.mask.nxv8f32.nxv8f16( , , , - i32); + iXLen); -define @intrinsic_vfwnmacc_mask_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwnmacc_mask_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv8f32_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu @@ -180,7 +182,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -189,9 +191,9 @@ declare @llvm.riscv.vfwnmacc.nxv16f32.nxv16f16( , , , - i32); + iXLen); -define @intrinsic_vfwnmacc_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwnmacc_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv16f32_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu @@ -202,7 +204,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -212,9 +214,9 @@ declare @llvm.riscv.vfwnmacc.mask.nxv16f32.nxv16f16( , , , - i32); + iXLen); -define @intrinsic_vfwnmacc_mask_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwnmacc_mask_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv16f32_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu @@ -226,7 +228,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -235,9 +237,9 @@ declare @llvm.riscv.vfwnmacc.nxv1f64.nxv1f32( , , , - i32); + iXLen); -define @intrinsic_vfwnmacc_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwnmacc_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv1f64_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu @@ -248,7 +250,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -258,9 +260,9 @@ declare @llvm.riscv.vfwnmacc.mask.nxv1f64.nxv1f32( , , , - i32); + iXLen); -define @intrinsic_vfwnmacc_mask_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwnmacc_mask_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv1f64_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu @@ -272,7 +274,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -281,9 +283,9 @@ declare @llvm.riscv.vfwnmacc.nxv2f64.nxv2f32( , , , - i32); + iXLen); -define @intrinsic_vfwnmacc_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwnmacc_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv2f64_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu @@ -294,7 +296,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -304,9 +306,9 @@ declare @llvm.riscv.vfwnmacc.mask.nxv2f64.nxv2f32( , , , - i32); + iXLen); -define @intrinsic_vfwnmacc_mask_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwnmacc_mask_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv2f64_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu @@ -318,7 +320,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -327,9 +329,9 @@ declare @llvm.riscv.vfwnmacc.nxv4f64.nxv4f32( , , , - i32); + iXLen); -define @intrinsic_vfwnmacc_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwnmacc_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv4f64_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu @@ -340,7 +342,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -350,9 +352,9 @@ declare @llvm.riscv.vfwnmacc.mask.nxv4f64.nxv4f32( , , , - i32); + iXLen); -define @intrinsic_vfwnmacc_mask_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwnmacc_mask_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv4f64_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu @@ -364,7 +366,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -373,9 +375,9 @@ declare @llvm.riscv.vfwnmacc.nxv8f64.nxv8f32( , , , - i32); + iXLen); -define @intrinsic_vfwnmacc_vv_nxv8f64_nxv8f32_nxv8f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwnmacc_vv_nxv8f64_nxv8f32_nxv8f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv8f64_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu @@ -386,7 +388,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -396,9 +398,9 @@ declare @llvm.riscv.vfwnmacc.mask.nxv8f64.nxv8f32( , , , - i32); + iXLen); -define @intrinsic_vfwnmacc_mask_vv_nxv8f64_nxv8f32_nxv8f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwnmacc_mask_vv_nxv8f64_nxv8f32_nxv8f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv8f64_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu @@ -410,7 +412,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -419,9 +421,9 @@ declare @llvm.riscv.vfwnmacc.nxv1f32.f16( , half, , - i32); + iXLen); -define @intrinsic_vfwnmacc_vf_nxv1f32_f16_nxv1f16( %0, half %1, %2, i32 %3) nounwind { +define @intrinsic_vfwnmacc_vf_nxv1f32_f16_nxv1f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv1f32_f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu @@ -432,7 +434,7 @@ entry: %0, half %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -442,9 +444,9 @@ declare @llvm.riscv.vfwnmacc.mask.nxv1f32.f16( half, , , - i32); + iXLen); -define @intrinsic_vfwnmacc_mask_vf_nxv1f32_f16_nxv1f16( %0, half %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwnmacc_mask_vf_nxv1f32_f16_nxv1f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv1f32_f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu @@ -456,7 +458,7 @@ entry: half %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -465,9 +467,9 @@ declare @llvm.riscv.vfwnmacc.nxv2f32.f16( , half, , - i32); + iXLen); -define @intrinsic_vfwnmacc_vf_nxv2f32_f16_nxv2f16( %0, half %1, %2, i32 %3) nounwind { +define @intrinsic_vfwnmacc_vf_nxv2f32_f16_nxv2f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv2f32_f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu @@ -478,7 +480,7 @@ entry: %0, half %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -488,9 +490,9 @@ declare @llvm.riscv.vfwnmacc.mask.nxv2f32.f16( half, , , - i32); + iXLen); -define @intrinsic_vfwnmacc_mask_vf_nxv2f32_f16_nxv2f16( %0, half %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwnmacc_mask_vf_nxv2f32_f16_nxv2f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv2f32_f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu @@ -502,7 +504,7 @@ entry: half %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -511,9 +513,9 @@ declare @llvm.riscv.vfwnmacc.nxv4f32.f16( , half, , - i32); + iXLen); -define @intrinsic_vfwnmacc_vf_nxv4f32_f16_nxv4f16( %0, half %1, %2, i32 %3) nounwind { +define @intrinsic_vfwnmacc_vf_nxv4f32_f16_nxv4f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv4f32_f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu @@ -524,7 +526,7 @@ entry: %0, half %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -534,9 +536,9 @@ declare @llvm.riscv.vfwnmacc.mask.nxv4f32.f16( half, , , - i32); + iXLen); -define @intrinsic_vfwnmacc_mask_vf_nxv4f32_f16_nxv4f16( %0, half %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwnmacc_mask_vf_nxv4f32_f16_nxv4f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv4f32_f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu @@ -548,7 +550,7 @@ entry: half %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -557,9 +559,9 @@ declare @llvm.riscv.vfwnmacc.nxv8f32.f16( , half, , - i32); + iXLen); -define @intrinsic_vfwnmacc_vf_nxv8f32_f16_nxv8f16( %0, half %1, %2, i32 %3) nounwind { +define @intrinsic_vfwnmacc_vf_nxv8f32_f16_nxv8f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv8f32_f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu @@ -570,7 +572,7 @@ entry: %0, half %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -580,9 +582,9 @@ declare @llvm.riscv.vfwnmacc.mask.nxv8f32.f16( half, , , - i32); + iXLen); -define @intrinsic_vfwnmacc_mask_vf_nxv8f32_f16_nxv8f16( %0, half %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwnmacc_mask_vf_nxv8f32_f16_nxv8f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv8f32_f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu @@ -594,7 +596,7 @@ entry: half %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -603,9 +605,9 @@ declare @llvm.riscv.vfwnmacc.nxv16f32.f16( , half, , - i32); + iXLen); -define @intrinsic_vfwnmacc_vf_nxv16f32_f16_nxv16f16( %0, half %1, %2, i32 %3) nounwind { +define @intrinsic_vfwnmacc_vf_nxv16f32_f16_nxv16f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv16f32_f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu @@ -616,7 +618,7 @@ entry: %0, half %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -626,9 +628,9 @@ declare @llvm.riscv.vfwnmacc.mask.nxv16f32.f16( half, , , - i32); + iXLen); -define @intrinsic_vfwnmacc_mask_vf_nxv16f32_f16_nxv16f16( %0, half %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwnmacc_mask_vf_nxv16f32_f16_nxv16f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv16f32_f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu @@ -640,7 +642,7 @@ entry: half %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -649,9 +651,9 @@ declare @llvm.riscv.vfwnmacc.nxv1f64.f32( , float, , - i32); + iXLen); -define @intrinsic_vfwnmacc_vf_nxv1f64_f32_nxv1f32( %0, float %1, %2, i32 %3) nounwind { +define @intrinsic_vfwnmacc_vf_nxv1f64_f32_nxv1f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv1f64_f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu @@ -662,7 +664,7 @@ entry: %0, float %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -672,9 +674,9 @@ declare @llvm.riscv.vfwnmacc.mask.nxv1f64.f32( float, , , - i32); + iXLen); -define @intrinsic_vfwnmacc_mask_vf_nxv1f64_f32_nxv1f32( %0, float %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwnmacc_mask_vf_nxv1f64_f32_nxv1f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv1f64_f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu @@ -686,7 +688,7 @@ entry: float %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -695,9 +697,9 @@ declare @llvm.riscv.vfwnmacc.nxv2f64.f32( , float, , - i32); + iXLen); -define @intrinsic_vfwnmacc_vf_nxv2f64_f32_nxv2f32( %0, float %1, %2, i32 %3) nounwind { +define @intrinsic_vfwnmacc_vf_nxv2f64_f32_nxv2f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv2f64_f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu @@ -708,7 +710,7 @@ entry: %0, float %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -718,9 +720,9 @@ declare @llvm.riscv.vfwnmacc.mask.nxv2f64.f32( float, , , - i32); + iXLen); -define @intrinsic_vfwnmacc_mask_vf_nxv2f64_f32_nxv2f32( %0, float %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwnmacc_mask_vf_nxv2f64_f32_nxv2f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv2f64_f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu @@ -732,7 +734,7 @@ entry: float %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -741,9 +743,9 @@ declare @llvm.riscv.vfwnmacc.nxv4f64.f32( , float, , - i32); + iXLen); -define @intrinsic_vfwnmacc_vf_nxv4f64_f32_nxv4f32( %0, float %1, %2, i32 %3) nounwind { +define @intrinsic_vfwnmacc_vf_nxv4f64_f32_nxv4f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv4f64_f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu @@ -754,7 +756,7 @@ entry: %0, float %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -764,9 +766,9 @@ declare @llvm.riscv.vfwnmacc.mask.nxv4f64.f32( float, , , - i32); + iXLen); -define @intrinsic_vfwnmacc_mask_vf_nxv4f64_f32_nxv4f32( %0, float %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwnmacc_mask_vf_nxv4f64_f32_nxv4f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv4f64_f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu @@ -778,7 +780,7 @@ entry: float %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -787,9 +789,9 @@ declare @llvm.riscv.vfwnmacc.nxv8f64.f32( , float, , - i32); + iXLen); -define @intrinsic_vfwnmacc_vf_nxv8f64_f32_nxv8f32( %0, float %1, %2, i32 %3) nounwind { +define @intrinsic_vfwnmacc_vf_nxv8f64_f32_nxv8f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv8f64_f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu @@ -800,7 +802,7 @@ entry: %0, float %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -810,9 +812,9 @@ declare @llvm.riscv.vfwnmacc.mask.nxv8f64.f32( float, , , - i32); + iXLen); -define @intrinsic_vfwnmacc_mask_vf_nxv8f64_f32_nxv8f32( %0, float %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwnmacc_mask_vf_nxv8f64_f32_nxv8f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv8f64_f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu @@ -824,7 +826,7 @@ entry: float %1, %2, %3, - i32 %4) + iXLen %4) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwnmsac-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwnmsac-rv64.ll deleted file mode 100644 index 2fe370bb1d82..000000000000 --- a/llvm/test/CodeGen/RISCV/rvv/vfwnmsac-rv64.ll +++ /dev/null @@ -1,830 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: -target-abi=lp64d < %s | FileCheck %s -declare @llvm.riscv.vfwnmsac.nxv1f32.nxv1f16( - , - , - , - i64); - -define @intrinsic_vfwnmsac_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv1f32_nxv1f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu -; CHECK-NEXT: vfwnmsac.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwnmsac.nxv1f32.nxv1f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwnmsac.mask.nxv1f32.nxv1f16( - , - , - , - , - i64); - -define @intrinsic_vfwnmsac_mask_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv1f32_nxv1f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu -; CHECK-NEXT: vfwnmsac.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwnmsac.mask.nxv1f32.nxv1f16( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwnmsac.nxv2f32.nxv2f16( - , - , - , - i64); - -define @intrinsic_vfwnmsac_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv2f32_nxv2f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu -; CHECK-NEXT: vfwnmsac.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwnmsac.nxv2f32.nxv2f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwnmsac.mask.nxv2f32.nxv2f16( - , - , - , - , - i64); - -define @intrinsic_vfwnmsac_mask_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv2f32_nxv2f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu -; CHECK-NEXT: vfwnmsac.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwnmsac.mask.nxv2f32.nxv2f16( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwnmsac.nxv4f32.nxv4f16( - , - , - , - i64); - -define @intrinsic_vfwnmsac_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv4f32_nxv4f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu -; CHECK-NEXT: vfwnmsac.vv v8, v10, v11 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwnmsac.nxv4f32.nxv4f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwnmsac.mask.nxv4f32.nxv4f16( - , - , - , - , - i64); - -define @intrinsic_vfwnmsac_mask_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv4f32_nxv4f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu -; CHECK-NEXT: vfwnmsac.vv v8, v10, v11, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwnmsac.mask.nxv4f32.nxv4f16( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwnmsac.nxv8f32.nxv8f16( - , - , - , - i64); - -define @intrinsic_vfwnmsac_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv8f32_nxv8f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu -; CHECK-NEXT: vfwnmsac.vv v8, v12, v14 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwnmsac.nxv8f32.nxv8f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwnmsac.mask.nxv8f32.nxv8f16( - , - , - , - , - i64); - -define @intrinsic_vfwnmsac_mask_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv8f32_nxv8f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu -; CHECK-NEXT: vfwnmsac.vv v8, v12, v14, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwnmsac.mask.nxv8f32.nxv8f16( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwnmsac.nxv16f32.nxv16f16( - , - , - , - i64); - -define @intrinsic_vfwnmsac_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv16f32_nxv16f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu -; CHECK-NEXT: vfwnmsac.vv v8, v16, v20 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwnmsac.nxv16f32.nxv16f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwnmsac.mask.nxv16f32.nxv16f16( - , - , - , - , - i64); - -define @intrinsic_vfwnmsac_mask_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv16f32_nxv16f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu -; CHECK-NEXT: vfwnmsac.vv v8, v16, v20, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwnmsac.mask.nxv16f32.nxv16f16( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwnmsac.nxv1f64.nxv1f32( - , - , - , - i64); - -define @intrinsic_vfwnmsac_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv1f64_nxv1f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu -; CHECK-NEXT: vfwnmsac.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwnmsac.nxv1f64.nxv1f32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwnmsac.mask.nxv1f64.nxv1f32( - , - , - , - , - i64); - -define @intrinsic_vfwnmsac_mask_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv1f64_nxv1f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu -; CHECK-NEXT: vfwnmsac.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwnmsac.mask.nxv1f64.nxv1f32( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwnmsac.nxv2f64.nxv2f32( - , - , - , - i64); - -define @intrinsic_vfwnmsac_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv2f64_nxv2f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu -; CHECK-NEXT: vfwnmsac.vv v8, v10, v11 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwnmsac.nxv2f64.nxv2f32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwnmsac.mask.nxv2f64.nxv2f32( - , - , - , - , - i64); - -define @intrinsic_vfwnmsac_mask_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv2f64_nxv2f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu -; CHECK-NEXT: vfwnmsac.vv v8, v10, v11, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwnmsac.mask.nxv2f64.nxv2f32( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwnmsac.nxv4f64.nxv4f32( - , - , - , - i64); - -define @intrinsic_vfwnmsac_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv4f64_nxv4f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu -; CHECK-NEXT: vfwnmsac.vv v8, v12, v14 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwnmsac.nxv4f64.nxv4f32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwnmsac.mask.nxv4f64.nxv4f32( - , - , - , - , - i64); - -define @intrinsic_vfwnmsac_mask_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv4f64_nxv4f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu -; CHECK-NEXT: vfwnmsac.vv v8, v12, v14, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwnmsac.mask.nxv4f64.nxv4f32( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwnmsac.nxv8f64.nxv8f32( - , - , - , - i64); - -define @intrinsic_vfwnmsac_vv_nxv8f64_nxv8f32_nxv8f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv8f64_nxv8f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu -; CHECK-NEXT: vfwnmsac.vv v8, v16, v20 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwnmsac.nxv8f64.nxv8f32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwnmsac.mask.nxv8f64.nxv8f32( - , - , - , - , - i64); - -define @intrinsic_vfwnmsac_mask_vv_nxv8f64_nxv8f32_nxv8f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv8f64_nxv8f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu -; CHECK-NEXT: vfwnmsac.vv v8, v16, v20, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwnmsac.mask.nxv8f64.nxv8f32( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwnmsac.nxv1f32.f16( - , - half, - , - i64); - -define @intrinsic_vfwnmsac_vf_nxv1f32_f16_nxv1f16( %0, half %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv1f32_f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu -; CHECK-NEXT: vfwnmsac.vf v8, fa0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwnmsac.nxv1f32.f16( - %0, - half %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwnmsac.mask.nxv1f32.f16( - , - half, - , - , - i64); - -define @intrinsic_vfwnmsac_mask_vf_nxv1f32_f16_nxv1f16( %0, half %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv1f32_f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu -; CHECK-NEXT: vfwnmsac.vf v8, fa0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwnmsac.mask.nxv1f32.f16( - %0, - half %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwnmsac.nxv2f32.f16( - , - half, - , - i64); - -define @intrinsic_vfwnmsac_vf_nxv2f32_f16_nxv2f16( %0, half %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv2f32_f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu -; CHECK-NEXT: vfwnmsac.vf v8, fa0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwnmsac.nxv2f32.f16( - %0, - half %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwnmsac.mask.nxv2f32.f16( - , - half, - , - , - i64); - -define @intrinsic_vfwnmsac_mask_vf_nxv2f32_f16_nxv2f16( %0, half %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv2f32_f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu -; CHECK-NEXT: vfwnmsac.vf v8, fa0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwnmsac.mask.nxv2f32.f16( - %0, - half %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwnmsac.nxv4f32.f16( - , - half, - , - i64); - -define @intrinsic_vfwnmsac_vf_nxv4f32_f16_nxv4f16( %0, half %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv4f32_f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu -; CHECK-NEXT: vfwnmsac.vf v8, fa0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwnmsac.nxv4f32.f16( - %0, - half %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwnmsac.mask.nxv4f32.f16( - , - half, - , - , - i64); - -define @intrinsic_vfwnmsac_mask_vf_nxv4f32_f16_nxv4f16( %0, half %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv4f32_f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu -; CHECK-NEXT: vfwnmsac.vf v8, fa0, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwnmsac.mask.nxv4f32.f16( - %0, - half %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwnmsac.nxv8f32.f16( - , - half, - , - i64); - -define @intrinsic_vfwnmsac_vf_nxv8f32_f16_nxv8f16( %0, half %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv8f32_f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu -; CHECK-NEXT: vfwnmsac.vf v8, fa0, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwnmsac.nxv8f32.f16( - %0, - half %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwnmsac.mask.nxv8f32.f16( - , - half, - , - , - i64); - -define @intrinsic_vfwnmsac_mask_vf_nxv8f32_f16_nxv8f16( %0, half %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv8f32_f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu -; CHECK-NEXT: vfwnmsac.vf v8, fa0, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwnmsac.mask.nxv8f32.f16( - %0, - half %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwnmsac.nxv16f32.f16( - , - half, - , - i64); - -define @intrinsic_vfwnmsac_vf_nxv16f32_f16_nxv16f16( %0, half %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv16f32_f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu -; CHECK-NEXT: vfwnmsac.vf v8, fa0, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwnmsac.nxv16f32.f16( - %0, - half %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwnmsac.mask.nxv16f32.f16( - , - half, - , - , - i64); - -define @intrinsic_vfwnmsac_mask_vf_nxv16f32_f16_nxv16f16( %0, half %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv16f32_f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu -; CHECK-NEXT: vfwnmsac.vf v8, fa0, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwnmsac.mask.nxv16f32.f16( - %0, - half %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwnmsac.nxv1f64.f32( - , - float, - , - i64); - -define @intrinsic_vfwnmsac_vf_nxv1f64_f32_nxv1f32( %0, float %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv1f64_f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu -; CHECK-NEXT: vfwnmsac.vf v8, fa0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwnmsac.nxv1f64.f32( - %0, - float %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwnmsac.mask.nxv1f64.f32( - , - float, - , - , - i64); - -define @intrinsic_vfwnmsac_mask_vf_nxv1f64_f32_nxv1f32( %0, float %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv1f64_f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu -; CHECK-NEXT: vfwnmsac.vf v8, fa0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwnmsac.mask.nxv1f64.f32( - %0, - float %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwnmsac.nxv2f64.f32( - , - float, - , - i64); - -define @intrinsic_vfwnmsac_vf_nxv2f64_f32_nxv2f32( %0, float %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv2f64_f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu -; CHECK-NEXT: vfwnmsac.vf v8, fa0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwnmsac.nxv2f64.f32( - %0, - float %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwnmsac.mask.nxv2f64.f32( - , - float, - , - , - i64); - -define @intrinsic_vfwnmsac_mask_vf_nxv2f64_f32_nxv2f32( %0, float %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv2f64_f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu -; CHECK-NEXT: vfwnmsac.vf v8, fa0, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwnmsac.mask.nxv2f64.f32( - %0, - float %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwnmsac.nxv4f64.f32( - , - float, - , - i64); - -define @intrinsic_vfwnmsac_vf_nxv4f64_f32_nxv4f32( %0, float %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv4f64_f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu -; CHECK-NEXT: vfwnmsac.vf v8, fa0, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwnmsac.nxv4f64.f32( - %0, - float %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwnmsac.mask.nxv4f64.f32( - , - float, - , - , - i64); - -define @intrinsic_vfwnmsac_mask_vf_nxv4f64_f32_nxv4f32( %0, float %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv4f64_f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu -; CHECK-NEXT: vfwnmsac.vf v8, fa0, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwnmsac.mask.nxv4f64.f32( - %0, - float %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwnmsac.nxv8f64.f32( - , - float, - , - i64); - -define @intrinsic_vfwnmsac_vf_nxv8f64_f32_nxv8f32( %0, float %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv8f64_f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu -; CHECK-NEXT: vfwnmsac.vf v8, fa0, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwnmsac.nxv8f64.f32( - %0, - float %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwnmsac.mask.nxv8f64.f32( - , - float, - , - , - i64); - -define @intrinsic_vfwnmsac_mask_vf_nxv8f64_f32_nxv8f32( %0, float %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv8f64_f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu -; CHECK-NEXT: vfwnmsac.vf v8, fa0, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwnmsac.mask.nxv8f64.f32( - %0, - float %1, - %2, - %3, - i64 %4) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwnmsac-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwnmsac.ll similarity index 89% rename from llvm/test/CodeGen/RISCV/rvv/vfwnmsac-rv32.ll rename to llvm/test/CodeGen/RISCV/rvv/vfwnmsac.ll index fe9683ed15ad..26fcb06d8916 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfwnmsac-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwnmsac.ll @@ -1,13 +1,15 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: -target-abi=ilp32d < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfwnmsac.nxv1f32.nxv1f16( , , , - i32); + iXLen); -define @intrinsic_vfwnmsac_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwnmsac_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv1f32_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu @@ -18,7 +20,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -28,9 +30,9 @@ declare @llvm.riscv.vfwnmsac.mask.nxv1f32.nxv1f16( , , , - i32); + iXLen); -define @intrinsic_vfwnmsac_mask_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwnmsac_mask_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv1f32_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu @@ -42,7 +44,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -51,9 +53,9 @@ declare @llvm.riscv.vfwnmsac.nxv2f32.nxv2f16( , , , - i32); + iXLen); -define @intrinsic_vfwnmsac_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwnmsac_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv2f32_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu @@ -64,7 +66,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -74,9 +76,9 @@ declare @llvm.riscv.vfwnmsac.mask.nxv2f32.nxv2f16( , , , - i32); + iXLen); -define @intrinsic_vfwnmsac_mask_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwnmsac_mask_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv2f32_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu @@ -88,7 +90,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -97,9 +99,9 @@ declare @llvm.riscv.vfwnmsac.nxv4f32.nxv4f16( , , , - i32); + iXLen); -define @intrinsic_vfwnmsac_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwnmsac_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv4f32_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu @@ -110,7 +112,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -120,9 +122,9 @@ declare @llvm.riscv.vfwnmsac.mask.nxv4f32.nxv4f16( , , , - i32); + iXLen); -define @intrinsic_vfwnmsac_mask_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwnmsac_mask_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv4f32_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu @@ -134,7 +136,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -143,9 +145,9 @@ declare @llvm.riscv.vfwnmsac.nxv8f32.nxv8f16( , , , - i32); + iXLen); -define @intrinsic_vfwnmsac_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwnmsac_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv8f32_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu @@ -156,7 +158,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -166,9 +168,9 @@ declare @llvm.riscv.vfwnmsac.mask.nxv8f32.nxv8f16( , , , - i32); + iXLen); -define @intrinsic_vfwnmsac_mask_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwnmsac_mask_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv8f32_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu @@ -180,7 +182,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -189,9 +191,9 @@ declare @llvm.riscv.vfwnmsac.nxv16f32.nxv16f16( , , , - i32); + iXLen); -define @intrinsic_vfwnmsac_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwnmsac_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv16f32_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu @@ -202,7 +204,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -212,9 +214,9 @@ declare @llvm.riscv.vfwnmsac.mask.nxv16f32.nxv16f16( , , , - i32); + iXLen); -define @intrinsic_vfwnmsac_mask_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwnmsac_mask_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv16f32_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu @@ -226,7 +228,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -235,9 +237,9 @@ declare @llvm.riscv.vfwnmsac.nxv1f64.nxv1f32( , , , - i32); + iXLen); -define @intrinsic_vfwnmsac_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwnmsac_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv1f64_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu @@ -248,7 +250,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -258,9 +260,9 @@ declare @llvm.riscv.vfwnmsac.mask.nxv1f64.nxv1f32( , , , - i32); + iXLen); -define @intrinsic_vfwnmsac_mask_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwnmsac_mask_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv1f64_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu @@ -272,7 +274,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -281,9 +283,9 @@ declare @llvm.riscv.vfwnmsac.nxv2f64.nxv2f32( , , , - i32); + iXLen); -define @intrinsic_vfwnmsac_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwnmsac_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv2f64_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu @@ -294,7 +296,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -304,9 +306,9 @@ declare @llvm.riscv.vfwnmsac.mask.nxv2f64.nxv2f32( , , , - i32); + iXLen); -define @intrinsic_vfwnmsac_mask_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwnmsac_mask_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv2f64_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu @@ -318,7 +320,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -327,9 +329,9 @@ declare @llvm.riscv.vfwnmsac.nxv4f64.nxv4f32( , , , - i32); + iXLen); -define @intrinsic_vfwnmsac_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwnmsac_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv4f64_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu @@ -340,7 +342,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -350,9 +352,9 @@ declare @llvm.riscv.vfwnmsac.mask.nxv4f64.nxv4f32( , , , - i32); + iXLen); -define @intrinsic_vfwnmsac_mask_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwnmsac_mask_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv4f64_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu @@ -364,7 +366,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -373,9 +375,9 @@ declare @llvm.riscv.vfwnmsac.nxv8f64.nxv8f32( , , , - i32); + iXLen); -define @intrinsic_vfwnmsac_vv_nxv8f64_nxv8f32_nxv8f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwnmsac_vv_nxv8f64_nxv8f32_nxv8f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv8f64_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu @@ -386,7 +388,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -396,9 +398,9 @@ declare @llvm.riscv.vfwnmsac.mask.nxv8f64.nxv8f32( , , , - i32); + iXLen); -define @intrinsic_vfwnmsac_mask_vv_nxv8f64_nxv8f32_nxv8f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwnmsac_mask_vv_nxv8f64_nxv8f32_nxv8f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv8f64_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu @@ -410,7 +412,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -419,9 +421,9 @@ declare @llvm.riscv.vfwnmsac.nxv1f32.f16( , half, , - i32); + iXLen); -define @intrinsic_vfwnmsac_vf_nxv1f32_f16_nxv1f16( %0, half %1, %2, i32 %3) nounwind { +define @intrinsic_vfwnmsac_vf_nxv1f32_f16_nxv1f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv1f32_f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu @@ -432,7 +434,7 @@ entry: %0, half %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -442,9 +444,9 @@ declare @llvm.riscv.vfwnmsac.mask.nxv1f32.f16( half, , , - i32); + iXLen); -define @intrinsic_vfwnmsac_mask_vf_nxv1f32_f16_nxv1f16( %0, half %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwnmsac_mask_vf_nxv1f32_f16_nxv1f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv1f32_f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu @@ -456,7 +458,7 @@ entry: half %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -465,9 +467,9 @@ declare @llvm.riscv.vfwnmsac.nxv2f32.f16( , half, , - i32); + iXLen); -define @intrinsic_vfwnmsac_vf_nxv2f32_f16_nxv2f16( %0, half %1, %2, i32 %3) nounwind { +define @intrinsic_vfwnmsac_vf_nxv2f32_f16_nxv2f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv2f32_f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu @@ -478,7 +480,7 @@ entry: %0, half %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -488,9 +490,9 @@ declare @llvm.riscv.vfwnmsac.mask.nxv2f32.f16( half, , , - i32); + iXLen); -define @intrinsic_vfwnmsac_mask_vf_nxv2f32_f16_nxv2f16( %0, half %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwnmsac_mask_vf_nxv2f32_f16_nxv2f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv2f32_f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu @@ -502,7 +504,7 @@ entry: half %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -511,9 +513,9 @@ declare @llvm.riscv.vfwnmsac.nxv4f32.f16( , half, , - i32); + iXLen); -define @intrinsic_vfwnmsac_vf_nxv4f32_f16_nxv4f16( %0, half %1, %2, i32 %3) nounwind { +define @intrinsic_vfwnmsac_vf_nxv4f32_f16_nxv4f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv4f32_f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu @@ -524,7 +526,7 @@ entry: %0, half %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -534,9 +536,9 @@ declare @llvm.riscv.vfwnmsac.mask.nxv4f32.f16( half, , , - i32); + iXLen); -define @intrinsic_vfwnmsac_mask_vf_nxv4f32_f16_nxv4f16( %0, half %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwnmsac_mask_vf_nxv4f32_f16_nxv4f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv4f32_f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu @@ -548,7 +550,7 @@ entry: half %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -557,9 +559,9 @@ declare @llvm.riscv.vfwnmsac.nxv8f32.f16( , half, , - i32); + iXLen); -define @intrinsic_vfwnmsac_vf_nxv8f32_f16_nxv8f16( %0, half %1, %2, i32 %3) nounwind { +define @intrinsic_vfwnmsac_vf_nxv8f32_f16_nxv8f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv8f32_f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu @@ -570,7 +572,7 @@ entry: %0, half %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -580,9 +582,9 @@ declare @llvm.riscv.vfwnmsac.mask.nxv8f32.f16( half, , , - i32); + iXLen); -define @intrinsic_vfwnmsac_mask_vf_nxv8f32_f16_nxv8f16( %0, half %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwnmsac_mask_vf_nxv8f32_f16_nxv8f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv8f32_f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu @@ -594,7 +596,7 @@ entry: half %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -603,9 +605,9 @@ declare @llvm.riscv.vfwnmsac.nxv16f32.f16( , half, , - i32); + iXLen); -define @intrinsic_vfwnmsac_vf_nxv16f32_f16_nxv16f16( %0, half %1, %2, i32 %3) nounwind { +define @intrinsic_vfwnmsac_vf_nxv16f32_f16_nxv16f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv16f32_f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu @@ -616,7 +618,7 @@ entry: %0, half %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -626,9 +628,9 @@ declare @llvm.riscv.vfwnmsac.mask.nxv16f32.f16( half, , , - i32); + iXLen); -define @intrinsic_vfwnmsac_mask_vf_nxv16f32_f16_nxv16f16( %0, half %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwnmsac_mask_vf_nxv16f32_f16_nxv16f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv16f32_f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu @@ -640,7 +642,7 @@ entry: half %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -649,9 +651,9 @@ declare @llvm.riscv.vfwnmsac.nxv1f64.f32( , float, , - i32); + iXLen); -define @intrinsic_vfwnmsac_vf_nxv1f64_f32_nxv1f32( %0, float %1, %2, i32 %3) nounwind { +define @intrinsic_vfwnmsac_vf_nxv1f64_f32_nxv1f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv1f64_f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu @@ -662,7 +664,7 @@ entry: %0, float %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -672,9 +674,9 @@ declare @llvm.riscv.vfwnmsac.mask.nxv1f64.f32( float, , , - i32); + iXLen); -define @intrinsic_vfwnmsac_mask_vf_nxv1f64_f32_nxv1f32( %0, float %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwnmsac_mask_vf_nxv1f64_f32_nxv1f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv1f64_f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu @@ -686,7 +688,7 @@ entry: float %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -695,9 +697,9 @@ declare @llvm.riscv.vfwnmsac.nxv2f64.f32( , float, , - i32); + iXLen); -define @intrinsic_vfwnmsac_vf_nxv2f64_f32_nxv2f32( %0, float %1, %2, i32 %3) nounwind { +define @intrinsic_vfwnmsac_vf_nxv2f64_f32_nxv2f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv2f64_f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu @@ -708,7 +710,7 @@ entry: %0, float %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -718,9 +720,9 @@ declare @llvm.riscv.vfwnmsac.mask.nxv2f64.f32( float, , , - i32); + iXLen); -define @intrinsic_vfwnmsac_mask_vf_nxv2f64_f32_nxv2f32( %0, float %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwnmsac_mask_vf_nxv2f64_f32_nxv2f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv2f64_f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu @@ -732,7 +734,7 @@ entry: float %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -741,9 +743,9 @@ declare @llvm.riscv.vfwnmsac.nxv4f64.f32( , float, , - i32); + iXLen); -define @intrinsic_vfwnmsac_vf_nxv4f64_f32_nxv4f32( %0, float %1, %2, i32 %3) nounwind { +define @intrinsic_vfwnmsac_vf_nxv4f64_f32_nxv4f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv4f64_f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu @@ -754,7 +756,7 @@ entry: %0, float %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -764,9 +766,9 @@ declare @llvm.riscv.vfwnmsac.mask.nxv4f64.f32( float, , , - i32); + iXLen); -define @intrinsic_vfwnmsac_mask_vf_nxv4f64_f32_nxv4f32( %0, float %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwnmsac_mask_vf_nxv4f64_f32_nxv4f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv4f64_f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu @@ -778,7 +780,7 @@ entry: float %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -787,9 +789,9 @@ declare @llvm.riscv.vfwnmsac.nxv8f64.f32( , float, , - i32); + iXLen); -define @intrinsic_vfwnmsac_vf_nxv8f64_f32_nxv8f32( %0, float %1, %2, i32 %3) nounwind { +define @intrinsic_vfwnmsac_vf_nxv8f64_f32_nxv8f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv8f64_f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu @@ -800,7 +802,7 @@ entry: %0, float %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -810,9 +812,9 @@ declare @llvm.riscv.vfwnmsac.mask.nxv8f64.f32( float, , , - i32); + iXLen); -define @intrinsic_vfwnmsac_mask_vf_nxv8f64_f32_nxv8f32( %0, float %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwnmsac_mask_vf_nxv8f64_f32_nxv8f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv8f64_f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu @@ -824,7 +826,7 @@ entry: float %1, %2, %3, - i32 %4) + iXLen %4) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwredosum-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwredosum-rv64.ll deleted file mode 100644 index 2282bd5fbc86..000000000000 --- a/llvm/test/CodeGen/RISCV/rvv/vfwredosum-rv64.ll +++ /dev/null @@ -1,508 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s -declare @llvm.riscv.vfwredosum.nxv2f32.nxv1f16( - , - , - , - i64); - -define @intrinsic_vfwredosum_vs_nxv2f32_nxv1f16_nxv2f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv2f32_nxv1f16_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu -; CHECK-NEXT: vfwredosum.vs v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwredosum.nxv2f32.nxv1f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwredosum.mask.nxv2f32.nxv1f16.nxv2f32( - , - , - , - , - i64); - -define @intrinsic_vfwredosum_mask_vs_nxv2f32_nxv1f16_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv2f32_nxv1f16_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu -; CHECK-NEXT: vfwredosum.vs v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwredosum.mask.nxv2f32.nxv1f16.nxv2f32( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwredosum.nxv2f32.nxv2f16( - , - , - , - i64); - -define @intrinsic_vfwredosum_vs_nxv2f32_nxv2f16_nxv2f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv2f32_nxv2f16_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu -; CHECK-NEXT: vfwredosum.vs v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwredosum.nxv2f32.nxv2f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwredosum.mask.nxv2f32.nxv2f16.nxv2f32( - , - , - , - , - i64); - -define @intrinsic_vfwredosum_mask_vs_nxv2f32_nxv2f16_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv2f32_nxv2f16_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu -; CHECK-NEXT: vfwredosum.vs v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwredosum.mask.nxv2f32.nxv2f16.nxv2f32( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwredosum.nxv2f32.nxv4f16( - , - , - , - i64); - -define @intrinsic_vfwredosum_vs_nxv2f32_nxv4f16_nxv2f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv2f32_nxv4f16_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu -; CHECK-NEXT: vfwredosum.vs v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwredosum.nxv2f32.nxv4f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwredosum.mask.nxv2f32.nxv4f16.nxv2f32( - , - , - , - , - i64); - -define @intrinsic_vfwredosum_mask_vs_nxv2f32_nxv4f16_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv2f32_nxv4f16_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu -; CHECK-NEXT: vfwredosum.vs v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwredosum.mask.nxv2f32.nxv4f16.nxv2f32( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwredosum.nxv2f32.nxv8f16( - , - , - , - i64); - -define @intrinsic_vfwredosum_vs_nxv2f32_nxv8f16_nxv2f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv2f32_nxv8f16_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu -; CHECK-NEXT: vfwredosum.vs v8, v10, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwredosum.nxv2f32.nxv8f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwredosum.mask.nxv2f32.nxv8f16.nxv2f32( - , - , - , - , - i64); - -define @intrinsic_vfwredosum_mask_vs_nxv2f32_nxv8f16_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv2f32_nxv8f16_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu -; CHECK-NEXT: vfwredosum.vs v8, v10, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwredosum.mask.nxv2f32.nxv8f16.nxv2f32( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwredosum.nxv2f32.nxv16f16( - , - , - , - i64); - -define @intrinsic_vfwredosum_vs_nxv2f32_nxv16f16_nxv2f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv2f32_nxv16f16_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu -; CHECK-NEXT: vfwredosum.vs v8, v12, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwredosum.nxv2f32.nxv16f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwredosum.mask.nxv2f32.nxv16f16.nxv2f32( - , - , - , - , - i64); - -define @intrinsic_vfwredosum_mask_vs_nxv2f32_nxv16f16_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv2f32_nxv16f16_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu -; CHECK-NEXT: vfwredosum.vs v8, v12, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwredosum.mask.nxv2f32.nxv16f16.nxv2f32( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwredosum.nxv2f32.nxv32f16( - , - , - , - i64); - -define @intrinsic_vfwredosum_vs_nxv2f32_nxv32f16_nxv2f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv2f32_nxv32f16_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu -; CHECK-NEXT: vfwredosum.vs v8, v16, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwredosum.nxv2f32.nxv32f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwredosum.mask.nxv2f32.nxv32f16( - , - , - , - , - i64); - -define @intrinsic_vfwredosum_mask_vs_nxv2f32_nxv32f16_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv2f32_nxv32f16_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu -; CHECK-NEXT: vfwredosum.vs v8, v16, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwredosum.mask.nxv2f32.nxv32f16( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwredosum.nxv1f64.nxv1f32( - , - , - , - i64); - -define @intrinsic_vfwredosum_vs_nxv1f64_nxv1f32_nxv1f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv1f64_nxv1f32_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu -; CHECK-NEXT: vfwredosum.vs v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwredosum.nxv1f64.nxv1f32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwredosum.mask.nxv1f64.nxv1f32.nxv1f64( - , - , - , - , - i64); - -define @intrinsic_vfwredosum_mask_vs_nxv1f64_nxv1f32_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv1f64_nxv1f32_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu -; CHECK-NEXT: vfwredosum.vs v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwredosum.mask.nxv1f64.nxv1f32.nxv1f64( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwredosum.nxv1f64.nxv2f32( - , - , - , - i64); - -define @intrinsic_vfwredosum_vs_nxv1f64_nxv2f32_nxv1f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv1f64_nxv2f32_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu -; CHECK-NEXT: vfwredosum.vs v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwredosum.nxv1f64.nxv2f32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwredosum.mask.nxv1f64.nxv2f32.nxv1f64( - , - , - , - , - i64); - -define @intrinsic_vfwredosum_mask_vs_nxv1f64_nxv2f32_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv1f64_nxv2f32_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu -; CHECK-NEXT: vfwredosum.vs v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwredosum.mask.nxv1f64.nxv2f32.nxv1f64( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwredosum.nxv1f64.nxv4f32( - , - , - , - i64); - -define @intrinsic_vfwredosum_vs_nxv1f64_nxv4f32_nxv1f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv1f64_nxv4f32_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu -; CHECK-NEXT: vfwredosum.vs v8, v10, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwredosum.nxv1f64.nxv4f32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwredosum.mask.nxv1f64.nxv4f32.nxv1f64( - , - , - , - , - i64); - -define @intrinsic_vfwredosum_mask_vs_nxv1f64_nxv4f32_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv1f64_nxv4f32_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu -; CHECK-NEXT: vfwredosum.vs v8, v10, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwredosum.mask.nxv1f64.nxv4f32.nxv1f64( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwredosum.nxv1f64.nxv8f32( - , - , - , - i64); - -define @intrinsic_vfwredosum_vs_nxv1f64_nxv8f32_nxv1f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv1f64_nxv8f32_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu -; CHECK-NEXT: vfwredosum.vs v8, v12, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwredosum.nxv1f64.nxv8f32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwredosum.mask.nxv1f64.nxv8f32.nxv1f64( - , - , - , - , - i64); - -define @intrinsic_vfwredosum_mask_vs_nxv1f64_nxv8f32_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv1f64_nxv8f32_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu -; CHECK-NEXT: vfwredosum.vs v8, v12, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwredosum.mask.nxv1f64.nxv8f32.nxv1f64( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwredosum.nxv1f64.nxv16f32( - , - , - , - i64); - -define @intrinsic_vfwredosum_vs_nxv1f64_nxv16f32_nxv1f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv1f64_nxv16f32_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu -; CHECK-NEXT: vfwredosum.vs v8, v16, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwredosum.nxv1f64.nxv16f32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwredosum.mask.nxv1f64.nxv16f32( - , - , - , - , - i64); - -define @intrinsic_vfwredosum_mask_vs_nxv1f64_nxv16f32_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv1f64_nxv16f32_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu -; CHECK-NEXT: vfwredosum.vs v8, v16, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwredosum.mask.nxv1f64.nxv16f32( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwredosum-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwredosum.ll similarity index 88% rename from llvm/test/CodeGen/RISCV/rvv/vfwredosum-rv32.ll rename to llvm/test/CodeGen/RISCV/rvv/vfwredosum.ll index 37240159bd90..7eb16dd4b8a8 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfwredosum-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwredosum.ll @@ -1,13 +1,15 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfwredosum.nxv2f32.nxv1f16( , , , - i32); + iXLen); -define @intrinsic_vfwredosum_vs_nxv2f32_nxv1f16_nxv2f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwredosum_vs_nxv2f32_nxv1f16_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv2f32_nxv1f16_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu @@ -18,7 +20,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -28,9 +30,9 @@ declare @llvm.riscv.vfwredosum.mask.nxv2f32.nxv1f16.nxv2f32 , , , - i32); + iXLen); -define @intrinsic_vfwredosum_mask_vs_nxv2f32_nxv1f16_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwredosum_mask_vs_nxv2f32_nxv1f16_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv2f32_nxv1f16_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu @@ -42,7 +44,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -51,9 +53,9 @@ declare @llvm.riscv.vfwredosum.nxv2f32.nxv2f16( , , , - i32); + iXLen); -define @intrinsic_vfwredosum_vs_nxv2f32_nxv2f16_nxv2f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwredosum_vs_nxv2f32_nxv2f16_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv2f32_nxv2f16_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu @@ -64,7 +66,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -74,9 +76,9 @@ declare @llvm.riscv.vfwredosum.mask.nxv2f32.nxv2f16.nxv2f32 , , , - i32); + iXLen); -define @intrinsic_vfwredosum_mask_vs_nxv2f32_nxv2f16_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwredosum_mask_vs_nxv2f32_nxv2f16_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv2f32_nxv2f16_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu @@ -88,7 +90,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -97,9 +99,9 @@ declare @llvm.riscv.vfwredosum.nxv2f32.nxv4f16( , , , - i32); + iXLen); -define @intrinsic_vfwredosum_vs_nxv2f32_nxv4f16_nxv2f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwredosum_vs_nxv2f32_nxv4f16_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv2f32_nxv4f16_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu @@ -110,7 +112,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -120,9 +122,9 @@ declare @llvm.riscv.vfwredosum.mask.nxv2f32.nxv4f16.nxv2f32 , , , - i32); + iXLen); -define @intrinsic_vfwredosum_mask_vs_nxv2f32_nxv4f16_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwredosum_mask_vs_nxv2f32_nxv4f16_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv2f32_nxv4f16_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu @@ -134,7 +136,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -143,9 +145,9 @@ declare @llvm.riscv.vfwredosum.nxv2f32.nxv8f16( , , , - i32); + iXLen); -define @intrinsic_vfwredosum_vs_nxv2f32_nxv8f16_nxv2f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwredosum_vs_nxv2f32_nxv8f16_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv2f32_nxv8f16_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu @@ -156,7 +158,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -166,9 +168,9 @@ declare @llvm.riscv.vfwredosum.mask.nxv2f32.nxv8f16.nxv2f32 , , , - i32); + iXLen); -define @intrinsic_vfwredosum_mask_vs_nxv2f32_nxv8f16_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwredosum_mask_vs_nxv2f32_nxv8f16_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv2f32_nxv8f16_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu @@ -180,7 +182,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -189,9 +191,9 @@ declare @llvm.riscv.vfwredosum.nxv2f32.nxv16f16( , , , - i32); + iXLen); -define @intrinsic_vfwredosum_vs_nxv2f32_nxv16f16_nxv2f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwredosum_vs_nxv2f32_nxv16f16_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv2f32_nxv16f16_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu @@ -202,7 +204,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -212,9 +214,9 @@ declare @llvm.riscv.vfwredosum.mask.nxv2f32.nxv16f16.nxv2f3 , , , - i32); + iXLen); -define @intrinsic_vfwredosum_mask_vs_nxv2f32_nxv16f16_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwredosum_mask_vs_nxv2f32_nxv16f16_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv2f32_nxv16f16_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu @@ -226,7 +228,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -235,9 +237,9 @@ declare @llvm.riscv.vfwredosum.nxv2f32.nxv32f16( , , , - i32); + iXLen); -define @intrinsic_vfwredosum_vs_nxv2f32_nxv32f16_nxv2f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwredosum_vs_nxv2f32_nxv32f16_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv2f32_nxv32f16_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu @@ -248,7 +250,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -258,9 +260,9 @@ declare @llvm.riscv.vfwredosum.mask.nxv2f32.nxv32f16( , , , - i32); + iXLen); -define @intrinsic_vfwredosum_mask_vs_nxv2f32_nxv32f16_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwredosum_mask_vs_nxv2f32_nxv32f16_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv2f32_nxv32f16_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu @@ -272,7 +274,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -281,9 +283,9 @@ declare @llvm.riscv.vfwredosum.nxv1f64.nxv1f32( , , , - i32); + iXLen); -define @intrinsic_vfwredosum_vs_nxv1f64_nxv1f32_nxv1f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwredosum_vs_nxv1f64_nxv1f32_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv1f64_nxv1f32_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu @@ -294,7 +296,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -304,9 +306,9 @@ declare @llvm.riscv.vfwredosum.mask.nxv1f64.nxv1f32.nxv1f6 , , , - i32); + iXLen); -define @intrinsic_vfwredosum_mask_vs_nxv1f64_nxv1f32_nxv1f64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwredosum_mask_vs_nxv1f64_nxv1f32_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv1f64_nxv1f32_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu @@ -318,7 +320,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -327,9 +329,9 @@ declare @llvm.riscv.vfwredosum.nxv1f64.nxv2f32( , , , - i32); + iXLen); -define @intrinsic_vfwredosum_vs_nxv1f64_nxv2f32_nxv1f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwredosum_vs_nxv1f64_nxv2f32_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv1f64_nxv2f32_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu @@ -340,7 +342,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -350,9 +352,9 @@ declare @llvm.riscv.vfwredosum.mask.nxv1f64.nxv2f32.nxv1f6 , , , - i32); + iXLen); -define @intrinsic_vfwredosum_mask_vs_nxv1f64_nxv2f32_nxv1f64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwredosum_mask_vs_nxv1f64_nxv2f32_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv1f64_nxv2f32_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu @@ -364,7 +366,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -373,9 +375,9 @@ declare @llvm.riscv.vfwredosum.nxv1f64.nxv4f32( , , , - i32); + iXLen); -define @intrinsic_vfwredosum_vs_nxv1f64_nxv4f32_nxv1f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwredosum_vs_nxv1f64_nxv4f32_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv1f64_nxv4f32_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu @@ -386,7 +388,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -396,9 +398,9 @@ declare @llvm.riscv.vfwredosum.mask.nxv1f64.nxv4f32.nxv1f6 , , , - i32); + iXLen); -define @intrinsic_vfwredosum_mask_vs_nxv1f64_nxv4f32_nxv1f64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwredosum_mask_vs_nxv1f64_nxv4f32_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv1f64_nxv4f32_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu @@ -410,7 +412,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -419,9 +421,9 @@ declare @llvm.riscv.vfwredosum.nxv1f64.nxv8f32( , , , - i32); + iXLen); -define @intrinsic_vfwredosum_vs_nxv1f64_nxv8f32_nxv1f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwredosum_vs_nxv1f64_nxv8f32_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv1f64_nxv8f32_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu @@ -432,7 +434,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -442,9 +444,9 @@ declare @llvm.riscv.vfwredosum.mask.nxv1f64.nxv8f32.nxv1f6 , , , - i32); + iXLen); -define @intrinsic_vfwredosum_mask_vs_nxv1f64_nxv8f32_nxv1f64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwredosum_mask_vs_nxv1f64_nxv8f32_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv1f64_nxv8f32_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu @@ -456,7 +458,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -465,9 +467,9 @@ declare @llvm.riscv.vfwredosum.nxv1f64.nxv16f32( , , , - i32); + iXLen); -define @intrinsic_vfwredosum_vs_nxv1f64_nxv16f32_nxv1f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwredosum_vs_nxv1f64_nxv16f32_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv1f64_nxv16f32_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu @@ -478,7 +480,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -488,9 +490,9 @@ declare @llvm.riscv.vfwredosum.mask.nxv1f64.nxv16f32.nxv1f , , , - i32); + iXLen); -define @intrinsic_vfwredosum_mask_vs_nxv1f64_nxv16f32_nxv1f64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwredosum_mask_vs_nxv1f64_nxv16f32_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv1f64_nxv16f32_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu @@ -502,7 +504,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwredusum-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwredusum-rv64.ll deleted file mode 100644 index 52bde877eb7d..000000000000 --- a/llvm/test/CodeGen/RISCV/rvv/vfwredusum-rv64.ll +++ /dev/null @@ -1,508 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s -declare @llvm.riscv.vfwredusum.nxv2f32.nxv1f16( - , - , - , - i64); - -define @intrinsic_vfwredusum_vs_nxv2f32_nxv1f16_nxv2f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv2f32_nxv1f16_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu -; CHECK-NEXT: vfwredusum.vs v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwredusum.nxv2f32.nxv1f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwredusum.mask.nxv2f32.nxv1f16.nxv2f32( - , - , - , - , - i64); - -define @intrinsic_vfwredusum_mask_vs_nxv2f32_nxv1f16_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv2f32_nxv1f16_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu -; CHECK-NEXT: vfwredusum.vs v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwredusum.mask.nxv2f32.nxv1f16.nxv2f32( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwredusum.nxv2f32.nxv2f16( - , - , - , - i64); - -define @intrinsic_vfwredusum_vs_nxv2f32_nxv2f16_nxv2f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv2f32_nxv2f16_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu -; CHECK-NEXT: vfwredusum.vs v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwredusum.nxv2f32.nxv2f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwredusum.mask.nxv2f32.nxv2f16.nxv2f32( - , - , - , - , - i64); - -define @intrinsic_vfwredusum_mask_vs_nxv2f32_nxv2f16_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv2f32_nxv2f16_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu -; CHECK-NEXT: vfwredusum.vs v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwredusum.mask.nxv2f32.nxv2f16.nxv2f32( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwredusum.nxv2f32.nxv4f16( - , - , - , - i64); - -define @intrinsic_vfwredusum_vs_nxv2f32_nxv4f16_nxv2f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv2f32_nxv4f16_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu -; CHECK-NEXT: vfwredusum.vs v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwredusum.nxv2f32.nxv4f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwredusum.mask.nxv2f32.nxv4f16.nxv2f32( - , - , - , - , - i64); - -define @intrinsic_vfwredusum_mask_vs_nxv2f32_nxv4f16_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv2f32_nxv4f16_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu -; CHECK-NEXT: vfwredusum.vs v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwredusum.mask.nxv2f32.nxv4f16.nxv2f32( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwredusum.nxv2f32.nxv8f16( - , - , - , - i64); - -define @intrinsic_vfwredusum_vs_nxv2f32_nxv8f16_nxv2f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv2f32_nxv8f16_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu -; CHECK-NEXT: vfwredusum.vs v8, v10, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwredusum.nxv2f32.nxv8f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwredusum.mask.nxv2f32.nxv8f16.nxv2f32( - , - , - , - , - i64); - -define @intrinsic_vfwredusum_mask_vs_nxv2f32_nxv8f16_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv2f32_nxv8f16_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu -; CHECK-NEXT: vfwredusum.vs v8, v10, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwredusum.mask.nxv2f32.nxv8f16.nxv2f32( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwredusum.nxv2f32.nxv16f16( - , - , - , - i64); - -define @intrinsic_vfwredusum_vs_nxv2f32_nxv16f16_nxv2f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv2f32_nxv16f16_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu -; CHECK-NEXT: vfwredusum.vs v8, v12, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwredusum.nxv2f32.nxv16f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwredusum.mask.nxv2f32.nxv16f16.nxv2f32( - , - , - , - , - i64); - -define @intrinsic_vfwredusum_mask_vs_nxv2f32_nxv16f16_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv2f32_nxv16f16_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu -; CHECK-NEXT: vfwredusum.vs v8, v12, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwredusum.mask.nxv2f32.nxv16f16.nxv2f32( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwredusum.nxv2f32.nxv32f16( - , - , - , - i64); - -define @intrinsic_vfwredusum_vs_nxv2f32_nxv32f16_nxv2f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv2f32_nxv32f16_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu -; CHECK-NEXT: vfwredusum.vs v8, v16, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwredusum.nxv2f32.nxv32f16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwredusum.mask.nxv2f32.nxv32f16( - , - , - , - , - i64); - -define @intrinsic_vfwredusum_mask_vs_nxv2f32_nxv32f16_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv2f32_nxv32f16_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu -; CHECK-NEXT: vfwredusum.vs v8, v16, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwredusum.mask.nxv2f32.nxv32f16( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwredusum.nxv1f64.nxv1f32( - , - , - , - i64); - -define @intrinsic_vfwredusum_vs_nxv1f64_nxv1f32_nxv1f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv1f64_nxv1f32_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu -; CHECK-NEXT: vfwredusum.vs v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwredusum.nxv1f64.nxv1f32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwredusum.mask.nxv1f64.nxv1f32.nxv1f64( - , - , - , - , - i64); - -define @intrinsic_vfwredusum_mask_vs_nxv1f64_nxv1f32_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv1f64_nxv1f32_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu -; CHECK-NEXT: vfwredusum.vs v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwredusum.mask.nxv1f64.nxv1f32.nxv1f64( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwredusum.nxv1f64.nxv2f32( - , - , - , - i64); - -define @intrinsic_vfwredusum_vs_nxv1f64_nxv2f32_nxv1f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv1f64_nxv2f32_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu -; CHECK-NEXT: vfwredusum.vs v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwredusum.nxv1f64.nxv2f32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwredusum.mask.nxv1f64.nxv2f32.nxv1f64( - , - , - , - , - i64); - -define @intrinsic_vfwredusum_mask_vs_nxv1f64_nxv2f32_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv1f64_nxv2f32_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu -; CHECK-NEXT: vfwredusum.vs v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwredusum.mask.nxv1f64.nxv2f32.nxv1f64( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwredusum.nxv1f64.nxv4f32( - , - , - , - i64); - -define @intrinsic_vfwredusum_vs_nxv1f64_nxv4f32_nxv1f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv1f64_nxv4f32_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu -; CHECK-NEXT: vfwredusum.vs v8, v10, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwredusum.nxv1f64.nxv4f32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwredusum.mask.nxv1f64.nxv4f32.nxv1f64( - , - , - , - , - i64); - -define @intrinsic_vfwredusum_mask_vs_nxv1f64_nxv4f32_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv1f64_nxv4f32_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu -; CHECK-NEXT: vfwredusum.vs v8, v10, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwredusum.mask.nxv1f64.nxv4f32.nxv1f64( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwredusum.nxv1f64.nxv8f32( - , - , - , - i64); - -define @intrinsic_vfwredusum_vs_nxv1f64_nxv8f32_nxv1f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv1f64_nxv8f32_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu -; CHECK-NEXT: vfwredusum.vs v8, v12, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwredusum.nxv1f64.nxv8f32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwredusum.mask.nxv1f64.nxv8f32.nxv1f64( - , - , - , - , - i64); - -define @intrinsic_vfwredusum_mask_vs_nxv1f64_nxv8f32_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv1f64_nxv8f32_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu -; CHECK-NEXT: vfwredusum.vs v8, v12, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwredusum.mask.nxv1f64.nxv8f32.nxv1f64( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vfwredusum.nxv1f64.nxv16f32( - , - , - , - i64); - -define @intrinsic_vfwredusum_vs_nxv1f64_nxv16f32_nxv1f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv1f64_nxv16f32_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu -; CHECK-NEXT: vfwredusum.vs v8, v16, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwredusum.nxv1f64.nxv16f32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vfwredusum.mask.nxv1f64.nxv16f32( - , - , - , - , - i64); - -define @intrinsic_vfwredusum_mask_vs_nxv1f64_nxv16f32_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv1f64_nxv16f32_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu -; CHECK-NEXT: vfwredusum.vs v8, v16, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwredusum.mask.nxv1f64.nxv16f32( - %0, - %1, - %2, - %3, - i64 %4) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwredusum-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwredusum.ll similarity index 88% rename from llvm/test/CodeGen/RISCV/rvv/vfwredusum-rv32.ll rename to llvm/test/CodeGen/RISCV/rvv/vfwredusum.ll index fe56d0c6bd0d..897cd61fb437 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfwredusum-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwredusum.ll @@ -1,13 +1,15 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfwredusum.nxv2f32.nxv1f16( , , , - i32); + iXLen); -define @intrinsic_vfwredusum_vs_nxv2f32_nxv1f16_nxv2f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwredusum_vs_nxv2f32_nxv1f16_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv2f32_nxv1f16_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu @@ -18,7 +20,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -28,9 +30,9 @@ declare @llvm.riscv.vfwredusum.mask.nxv2f32.nxv1f16.nxv2f32 , , , - i32); + iXLen); -define @intrinsic_vfwredusum_mask_vs_nxv2f32_nxv1f16_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwredusum_mask_vs_nxv2f32_nxv1f16_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv2f32_nxv1f16_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu @@ -42,7 +44,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -51,9 +53,9 @@ declare @llvm.riscv.vfwredusum.nxv2f32.nxv2f16( , , , - i32); + iXLen); -define @intrinsic_vfwredusum_vs_nxv2f32_nxv2f16_nxv2f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwredusum_vs_nxv2f32_nxv2f16_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv2f32_nxv2f16_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu @@ -64,7 +66,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -74,9 +76,9 @@ declare @llvm.riscv.vfwredusum.mask.nxv2f32.nxv2f16.nxv2f32 , , , - i32); + iXLen); -define @intrinsic_vfwredusum_mask_vs_nxv2f32_nxv2f16_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwredusum_mask_vs_nxv2f32_nxv2f16_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv2f32_nxv2f16_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu @@ -88,7 +90,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -97,9 +99,9 @@ declare @llvm.riscv.vfwredusum.nxv2f32.nxv4f16( , , , - i32); + iXLen); -define @intrinsic_vfwredusum_vs_nxv2f32_nxv4f16_nxv2f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwredusum_vs_nxv2f32_nxv4f16_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv2f32_nxv4f16_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu @@ -110,7 +112,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -120,9 +122,9 @@ declare @llvm.riscv.vfwredusum.mask.nxv2f32.nxv4f16.nxv2f32 , , , - i32); + iXLen); -define @intrinsic_vfwredusum_mask_vs_nxv2f32_nxv4f16_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwredusum_mask_vs_nxv2f32_nxv4f16_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv2f32_nxv4f16_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu @@ -134,7 +136,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -143,9 +145,9 @@ declare @llvm.riscv.vfwredusum.nxv2f32.nxv8f16( , , , - i32); + iXLen); -define @intrinsic_vfwredusum_vs_nxv2f32_nxv8f16_nxv2f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwredusum_vs_nxv2f32_nxv8f16_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv2f32_nxv8f16_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu @@ -156,7 +158,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -166,9 +168,9 @@ declare @llvm.riscv.vfwredusum.mask.nxv2f32.nxv8f16.nxv2f32 , , , - i32); + iXLen); -define @intrinsic_vfwredusum_mask_vs_nxv2f32_nxv8f16_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwredusum_mask_vs_nxv2f32_nxv8f16_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv2f32_nxv8f16_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu @@ -180,7 +182,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -189,9 +191,9 @@ declare @llvm.riscv.vfwredusum.nxv2f32.nxv16f16( , , , - i32); + iXLen); -define @intrinsic_vfwredusum_vs_nxv2f32_nxv16f16_nxv2f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwredusum_vs_nxv2f32_nxv16f16_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv2f32_nxv16f16_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu @@ -202,7 +204,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -212,9 +214,9 @@ declare @llvm.riscv.vfwredusum.mask.nxv2f32.nxv16f16.nxv2f3 , , , - i32); + iXLen); -define @intrinsic_vfwredusum_mask_vs_nxv2f32_nxv16f16_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwredusum_mask_vs_nxv2f32_nxv16f16_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv2f32_nxv16f16_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu @@ -226,7 +228,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -235,9 +237,9 @@ declare @llvm.riscv.vfwredusum.nxv2f32.nxv32f16( , , , - i32); + iXLen); -define @intrinsic_vfwredusum_vs_nxv2f32_nxv32f16_nxv2f32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwredusum_vs_nxv2f32_nxv32f16_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv2f32_nxv32f16_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu @@ -248,7 +250,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -258,9 +260,9 @@ declare @llvm.riscv.vfwredusum.mask.nxv2f32.nxv32f16( , , , - i32); + iXLen); -define @intrinsic_vfwredusum_mask_vs_nxv2f32_nxv32f16_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwredusum_mask_vs_nxv2f32_nxv32f16_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv2f32_nxv32f16_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu @@ -272,7 +274,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -281,9 +283,9 @@ declare @llvm.riscv.vfwredusum.nxv1f64.nxv1f32( , , , - i32); + iXLen); -define @intrinsic_vfwredusum_vs_nxv1f64_nxv1f32_nxv1f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwredusum_vs_nxv1f64_nxv1f32_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv1f64_nxv1f32_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu @@ -294,7 +296,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -304,9 +306,9 @@ declare @llvm.riscv.vfwredusum.mask.nxv1f64.nxv1f32.nxv1f6 , , , - i32); + iXLen); -define @intrinsic_vfwredusum_mask_vs_nxv1f64_nxv1f32_nxv1f64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwredusum_mask_vs_nxv1f64_nxv1f32_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv1f64_nxv1f32_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu @@ -318,7 +320,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -327,9 +329,9 @@ declare @llvm.riscv.vfwredusum.nxv1f64.nxv2f32( , , , - i32); + iXLen); -define @intrinsic_vfwredusum_vs_nxv1f64_nxv2f32_nxv1f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwredusum_vs_nxv1f64_nxv2f32_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv1f64_nxv2f32_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu @@ -340,7 +342,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -350,9 +352,9 @@ declare @llvm.riscv.vfwredusum.mask.nxv1f64.nxv2f32.nxv1f6 , , , - i32); + iXLen); -define @intrinsic_vfwredusum_mask_vs_nxv1f64_nxv2f32_nxv1f64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwredusum_mask_vs_nxv1f64_nxv2f32_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv1f64_nxv2f32_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu @@ -364,7 +366,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -373,9 +375,9 @@ declare @llvm.riscv.vfwredusum.nxv1f64.nxv4f32( , , , - i32); + iXLen); -define @intrinsic_vfwredusum_vs_nxv1f64_nxv4f32_nxv1f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwredusum_vs_nxv1f64_nxv4f32_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv1f64_nxv4f32_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu @@ -386,7 +388,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -396,9 +398,9 @@ declare @llvm.riscv.vfwredusum.mask.nxv1f64.nxv4f32.nxv1f6 , , , - i32); + iXLen); -define @intrinsic_vfwredusum_mask_vs_nxv1f64_nxv4f32_nxv1f64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwredusum_mask_vs_nxv1f64_nxv4f32_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv1f64_nxv4f32_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu @@ -410,7 +412,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -419,9 +421,9 @@ declare @llvm.riscv.vfwredusum.nxv1f64.nxv8f32( , , , - i32); + iXLen); -define @intrinsic_vfwredusum_vs_nxv1f64_nxv8f32_nxv1f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwredusum_vs_nxv1f64_nxv8f32_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv1f64_nxv8f32_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu @@ -432,7 +434,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -442,9 +444,9 @@ declare @llvm.riscv.vfwredusum.mask.nxv1f64.nxv8f32.nxv1f6 , , , - i32); + iXLen); -define @intrinsic_vfwredusum_mask_vs_nxv1f64_nxv8f32_nxv1f64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwredusum_mask_vs_nxv1f64_nxv8f32_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv1f64_nxv8f32_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu @@ -456,7 +458,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -465,9 +467,9 @@ declare @llvm.riscv.vfwredusum.nxv1f64.nxv16f32( , , , - i32); + iXLen); -define @intrinsic_vfwredusum_vs_nxv1f64_nxv16f32_nxv1f64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vfwredusum_vs_nxv1f64_nxv16f32_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv1f64_nxv16f32_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu @@ -478,7 +480,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -488,9 +490,9 @@ declare @llvm.riscv.vfwredusum.mask.nxv1f64.nxv16f32.nxv1f , , , - i32); + iXLen); -define @intrinsic_vfwredusum_mask_vs_nxv1f64_nxv16f32_nxv1f64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwredusum_mask_vs_nxv1f64_nxv16f32_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv1f64_nxv16f32_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu @@ -502,7 +504,7 @@ entry: %1, %2, %3, - i32 %4) + iXLen %4) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwsub-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwsub-rv64.ll deleted file mode 100644 index d4b0780f03c5..000000000000 --- a/llvm/test/CodeGen/RISCV/rvv/vfwsub-rv64.ll +++ /dev/null @@ -1,830 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: -target-abi=lp64d < %s | FileCheck %s -declare @llvm.riscv.vfwsub.nxv1f32.nxv1f16.nxv1f16( - , - , - i64); - -define @intrinsic_vfwsub_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwsub_vv_nxv1f32_nxv1f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfwsub.vv v10, v8, v9 -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.nxv1f32.nxv1f16.nxv1f16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfwsub.mask.nxv1f32.nxv1f16.nxv1f16( - , - , - , - , - i64, - i64); - -define @intrinsic_vfwsub_mask_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv1f32_nxv1f16_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfwsub.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.mask.nxv1f32.nxv1f16.nxv1f16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwsub.nxv2f32.nxv2f16.nxv2f16( - , - , - i64); - -define @intrinsic_vfwsub_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwsub_vv_nxv2f32_nxv2f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfwsub.vv v10, v8, v9 -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.nxv2f32.nxv2f16.nxv2f16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfwsub.mask.nxv2f32.nxv2f16.nxv2f16( - , - , - , - , - i64, - i64); - -define @intrinsic_vfwsub_mask_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv2f32_nxv2f16_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfwsub.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.mask.nxv2f32.nxv2f16.nxv2f16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwsub.nxv4f32.nxv4f16.nxv4f16( - , - , - i64); - -define @intrinsic_vfwsub_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwsub_vv_nxv4f32_nxv4f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfwsub.vv v10, v8, v9 -; CHECK-NEXT: vmv2r.v v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.nxv4f32.nxv4f16.nxv4f16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfwsub.mask.nxv4f32.nxv4f16.nxv4f16( - , - , - , - , - i64, - i64); - -define @intrinsic_vfwsub_mask_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv4f32_nxv4f16_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfwsub.vv v8, v10, v11, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.mask.nxv4f32.nxv4f16.nxv4f16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwsub.nxv8f32.nxv8f16.nxv8f16( - , - , - i64); - -define @intrinsic_vfwsub_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwsub_vv_nxv8f32_nxv8f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfwsub.vv v12, v8, v10 -; CHECK-NEXT: vmv4r.v v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.nxv8f32.nxv8f16.nxv8f16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfwsub.mask.nxv8f32.nxv8f16.nxv8f16( - , - , - , - , - i64, - i64); - -define @intrinsic_vfwsub_mask_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv8f32_nxv8f16_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfwsub.vv v8, v12, v14, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.mask.nxv8f32.nxv8f16.nxv8f16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwsub.nxv16f32.nxv16f16.nxv16f16( - , - , - i64); - -define @intrinsic_vfwsub_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwsub_vv_nxv16f32_nxv16f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfwsub.vv v16, v8, v12 -; CHECK-NEXT: vmv8r.v v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.nxv16f32.nxv16f16.nxv16f16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfwsub.mask.nxv16f32.nxv16f16.nxv16f16( - , - , - , - , - i64, - i64); - -define @intrinsic_vfwsub_mask_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv16f32_nxv16f16_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfwsub.vv v8, v16, v20, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.mask.nxv16f32.nxv16f16.nxv16f16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwsub.nxv1f64.nxv1f32.nxv1f32( - , - , - i64); - -define @intrinsic_vfwsub_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwsub_vv_nxv1f64_nxv1f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfwsub.vv v10, v8, v9 -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.nxv1f64.nxv1f32.nxv1f32( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfwsub.mask.nxv1f64.nxv1f32.nxv1f32( - , - , - , - , - i64, - i64); - -define @intrinsic_vfwsub_mask_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv1f64_nxv1f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfwsub.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.mask.nxv1f64.nxv1f32.nxv1f32( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwsub.nxv2f64.nxv2f32.nxv2f32( - , - , - i64); - -define @intrinsic_vfwsub_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwsub_vv_nxv2f64_nxv2f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfwsub.vv v10, v8, v9 -; CHECK-NEXT: vmv2r.v v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.nxv2f64.nxv2f32.nxv2f32( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfwsub.mask.nxv2f64.nxv2f32.nxv2f32( - , - , - , - , - i64, - i64); - -define @intrinsic_vfwsub_mask_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv2f64_nxv2f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfwsub.vv v8, v10, v11, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.mask.nxv2f64.nxv2f32.nxv2f32( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwsub.nxv4f64.nxv4f32.nxv4f32( - , - , - i64); - -define @intrinsic_vfwsub_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwsub_vv_nxv4f64_nxv4f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfwsub.vv v12, v8, v10 -; CHECK-NEXT: vmv4r.v v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.nxv4f64.nxv4f32.nxv4f32( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfwsub.mask.nxv4f64.nxv4f32.nxv4f32( - , - , - , - , - i64, - i64); - -define @intrinsic_vfwsub_mask_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv4f64_nxv4f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfwsub.vv v8, v12, v14, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.mask.nxv4f64.nxv4f32.nxv4f32( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwsub.nxv8f64.nxv8f32.nxv8f32( - , - , - i64); - -define @intrinsic_vfwsub_vv_nxv8f64_nxv8f32_nxv8f32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwsub_vv_nxv8f64_nxv8f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfwsub.vv v16, v8, v12 -; CHECK-NEXT: vmv8r.v v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.nxv8f64.nxv8f32.nxv8f32( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfwsub.mask.nxv8f64.nxv8f32.nxv8f32( - , - , - , - , - i64, - i64); - -define @intrinsic_vfwsub_mask_vv_nxv8f64_nxv8f32_nxv8f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv8f64_nxv8f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfwsub.vv v8, v16, v20, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.mask.nxv8f64.nxv8f32.nxv8f32( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwsub.nxv1f32.nxv1f16.f16( - , - half, - i64); - -define @intrinsic_vfwsub_vf_nxv1f32_nxv1f16_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwsub_vf_nxv1f32_nxv1f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfwsub.vf v9, v8, fa0 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.nxv1f32.nxv1f16.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfwsub.mask.nxv1f32.nxv1f16.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfwsub_mask_vf_nxv1f32_nxv1f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv1f32_nxv1f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfwsub.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.mask.nxv1f32.nxv1f16.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwsub.nxv2f32.nxv2f16.f16( - , - half, - i64); - -define @intrinsic_vfwsub_vf_nxv2f32_nxv2f16_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwsub_vf_nxv2f32_nxv2f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfwsub.vf v9, v8, fa0 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.nxv2f32.nxv2f16.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfwsub.mask.nxv2f32.nxv2f16.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfwsub_mask_vf_nxv2f32_nxv2f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv2f32_nxv2f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfwsub.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.mask.nxv2f32.nxv2f16.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwsub.nxv4f32.nxv4f16.f16( - , - half, - i64); - -define @intrinsic_vfwsub_vf_nxv4f32_nxv4f16_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwsub_vf_nxv4f32_nxv4f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfwsub.vf v10, v8, fa0 -; CHECK-NEXT: vmv2r.v v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.nxv4f32.nxv4f16.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfwsub.mask.nxv4f32.nxv4f16.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfwsub_mask_vf_nxv4f32_nxv4f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv4f32_nxv4f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfwsub.vf v8, v10, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.mask.nxv4f32.nxv4f16.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwsub.nxv8f32.nxv8f16.f16( - , - half, - i64); - -define @intrinsic_vfwsub_vf_nxv8f32_nxv8f16_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwsub_vf_nxv8f32_nxv8f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfwsub.vf v12, v8, fa0 -; CHECK-NEXT: vmv4r.v v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.nxv8f32.nxv8f16.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfwsub.mask.nxv8f32.nxv8f16.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfwsub_mask_vf_nxv8f32_nxv8f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv8f32_nxv8f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfwsub.vf v8, v12, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.mask.nxv8f32.nxv8f16.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwsub.nxv16f32.nxv16f16.f16( - , - half, - i64); - -define @intrinsic_vfwsub_vf_nxv16f32_nxv16f16_f16( %0, half %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwsub_vf_nxv16f32_nxv16f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfwsub.vf v16, v8, fa0 -; CHECK-NEXT: vmv8r.v v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.nxv16f32.nxv16f16.f16( - %0, - half %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfwsub.mask.nxv16f32.nxv16f16.f16( - , - , - half, - , - i64, - i64); - -define @intrinsic_vfwsub_mask_vf_nxv16f32_nxv16f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv16f32_nxv16f16_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfwsub.vf v8, v16, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.mask.nxv16f32.nxv16f16.f16( - %0, - %1, - half %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwsub.nxv1f64.nxv1f32.f32( - , - float, - i64); - -define @intrinsic_vfwsub_vf_nxv1f64_nxv1f32_f32( %0, float %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwsub_vf_nxv1f64_nxv1f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfwsub.vf v9, v8, fa0 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.nxv1f64.nxv1f32.f32( - %0, - float %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfwsub.mask.nxv1f64.nxv1f32.f32( - , - , - float, - , - i64, - i64); - -define @intrinsic_vfwsub_mask_vf_nxv1f64_nxv1f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv1f64_nxv1f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfwsub.vf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.mask.nxv1f64.nxv1f32.f32( - %0, - %1, - float %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwsub.nxv2f64.nxv2f32.f32( - , - float, - i64); - -define @intrinsic_vfwsub_vf_nxv2f64_nxv2f32_f32( %0, float %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwsub_vf_nxv2f64_nxv2f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfwsub.vf v10, v8, fa0 -; CHECK-NEXT: vmv2r.v v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.nxv2f64.nxv2f32.f32( - %0, - float %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfwsub.mask.nxv2f64.nxv2f32.f32( - , - , - float, - , - i64, - i64); - -define @intrinsic_vfwsub_mask_vf_nxv2f64_nxv2f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv2f64_nxv2f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfwsub.vf v8, v10, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.mask.nxv2f64.nxv2f32.f32( - %0, - %1, - float %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwsub.nxv4f64.nxv4f32.f32( - , - float, - i64); - -define @intrinsic_vfwsub_vf_nxv4f64_nxv4f32_f32( %0, float %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwsub_vf_nxv4f64_nxv4f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfwsub.vf v12, v8, fa0 -; CHECK-NEXT: vmv4r.v v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.nxv4f64.nxv4f32.f32( - %0, - float %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfwsub.mask.nxv4f64.nxv4f32.f32( - , - , - float, - , - i64, - i64); - -define @intrinsic_vfwsub_mask_vf_nxv4f64_nxv4f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv4f64_nxv4f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfwsub.vf v8, v12, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.mask.nxv4f64.nxv4f32.f32( - %0, - %1, - float %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vfwsub.nxv8f64.nxv8f32.f32( - , - float, - i64); - -define @intrinsic_vfwsub_vf_nxv8f64_nxv8f32_f32( %0, float %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwsub_vf_nxv8f64_nxv8f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfwsub.vf v16, v8, fa0 -; CHECK-NEXT: vmv8r.v v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.nxv8f64.nxv8f32.f32( - %0, - float %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vfwsub.mask.nxv8f64.nxv8f32.f32( - , - , - float, - , - i64, - i64); - -define @intrinsic_vfwsub_mask_vf_nxv8f64_nxv8f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv8f64_nxv8f32_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfwsub.vf v8, v16, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.mask.nxv8f64.nxv8f32.f32( - %0, - %1, - float %2, - %3, - i64 %4, i64 1) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwsub-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwsub.ll similarity index 86% rename from llvm/test/CodeGen/RISCV/rvv/vfwsub-rv32.ll rename to llvm/test/CodeGen/RISCV/rvv/vfwsub.ll index e81121f848dd..916abcae0de0 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfwsub-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwsub.ll @@ -1,12 +1,14 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: -target-abi=ilp32d < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfwsub.nxv1f32.nxv1f16.nxv1f16( , , - i32); + iXLen); -define @intrinsic_vfwsub_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, i32 %2) nounwind { +define @intrinsic_vfwsub_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_vv_nxv1f32_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -17,7 +19,7 @@ entry: %a = call @llvm.riscv.vfwsub.nxv1f32.nxv1f16.nxv1f16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -27,10 +29,10 @@ declare @llvm.riscv.vfwsub.mask.nxv1f32.nxv1f16.nxv1f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwsub_mask_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwsub_mask_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv1f32_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -42,7 +44,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -50,9 +52,9 @@ entry: declare @llvm.riscv.vfwsub.nxv2f32.nxv2f16.nxv2f16( , , - i32); + iXLen); -define @intrinsic_vfwsub_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, i32 %2) nounwind { +define @intrinsic_vfwsub_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_vv_nxv2f32_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -63,7 +65,7 @@ entry: %a = call @llvm.riscv.vfwsub.nxv2f32.nxv2f16.nxv2f16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -73,10 +75,10 @@ declare @llvm.riscv.vfwsub.mask.nxv2f32.nxv2f16.nxv2f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwsub_mask_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwsub_mask_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv2f32_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -88,7 +90,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -96,9 +98,9 @@ entry: declare @llvm.riscv.vfwsub.nxv4f32.nxv4f16.nxv4f16( , , - i32); + iXLen); -define @intrinsic_vfwsub_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, i32 %2) nounwind { +define @intrinsic_vfwsub_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_vv_nxv4f32_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -109,7 +111,7 @@ entry: %a = call @llvm.riscv.vfwsub.nxv4f32.nxv4f16.nxv4f16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -119,10 +121,10 @@ declare @llvm.riscv.vfwsub.mask.nxv4f32.nxv4f16.nxv4f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwsub_mask_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwsub_mask_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv4f32_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -134,7 +136,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -142,9 +144,9 @@ entry: declare @llvm.riscv.vfwsub.nxv8f32.nxv8f16.nxv8f16( , , - i32); + iXLen); -define @intrinsic_vfwsub_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, i32 %2) nounwind { +define @intrinsic_vfwsub_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_vv_nxv8f32_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -155,7 +157,7 @@ entry: %a = call @llvm.riscv.vfwsub.nxv8f32.nxv8f16.nxv8f16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -165,10 +167,10 @@ declare @llvm.riscv.vfwsub.mask.nxv8f32.nxv8f16.nxv8f16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwsub_mask_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwsub_mask_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv8f32_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -180,7 +182,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -188,9 +190,9 @@ entry: declare @llvm.riscv.vfwsub.nxv16f32.nxv16f16.nxv16f16( , , - i32); + iXLen); -define @intrinsic_vfwsub_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, i32 %2) nounwind { +define @intrinsic_vfwsub_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_vv_nxv16f32_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -201,7 +203,7 @@ entry: %a = call @llvm.riscv.vfwsub.nxv16f32.nxv16f16.nxv16f16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -211,10 +213,10 @@ declare @llvm.riscv.vfwsub.mask.nxv16f32.nxv16f16.nxv16f16 , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwsub_mask_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwsub_mask_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv16f32_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -226,7 +228,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -234,9 +236,9 @@ entry: declare @llvm.riscv.vfwsub.nxv1f64.nxv1f32.nxv1f32( , , - i32); + iXLen); -define @intrinsic_vfwsub_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, i32 %2) nounwind { +define @intrinsic_vfwsub_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_vv_nxv1f64_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -247,7 +249,7 @@ entry: %a = call @llvm.riscv.vfwsub.nxv1f64.nxv1f32.nxv1f32( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -257,10 +259,10 @@ declare @llvm.riscv.vfwsub.mask.nxv1f64.nxv1f32.nxv1f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwsub_mask_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwsub_mask_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv1f64_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -272,7 +274,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -280,9 +282,9 @@ entry: declare @llvm.riscv.vfwsub.nxv2f64.nxv2f32.nxv2f32( , , - i32); + iXLen); -define @intrinsic_vfwsub_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, i32 %2) nounwind { +define @intrinsic_vfwsub_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_vv_nxv2f64_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -293,7 +295,7 @@ entry: %a = call @llvm.riscv.vfwsub.nxv2f64.nxv2f32.nxv2f32( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -303,10 +305,10 @@ declare @llvm.riscv.vfwsub.mask.nxv2f64.nxv2f32.nxv2f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwsub_mask_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwsub_mask_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv2f64_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -318,7 +320,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -326,9 +328,9 @@ entry: declare @llvm.riscv.vfwsub.nxv4f64.nxv4f32.nxv4f32( , , - i32); + iXLen); -define @intrinsic_vfwsub_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, i32 %2) nounwind { +define @intrinsic_vfwsub_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_vv_nxv4f64_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -339,7 +341,7 @@ entry: %a = call @llvm.riscv.vfwsub.nxv4f64.nxv4f32.nxv4f32( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -349,10 +351,10 @@ declare @llvm.riscv.vfwsub.mask.nxv4f64.nxv4f32.nxv4f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwsub_mask_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwsub_mask_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv4f64_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -364,7 +366,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -372,9 +374,9 @@ entry: declare @llvm.riscv.vfwsub.nxv8f64.nxv8f32.nxv8f32( , , - i32); + iXLen); -define @intrinsic_vfwsub_vv_nxv8f64_nxv8f32_nxv8f32( %0, %1, i32 %2) nounwind { +define @intrinsic_vfwsub_vv_nxv8f64_nxv8f32_nxv8f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_vv_nxv8f64_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -385,7 +387,7 @@ entry: %a = call @llvm.riscv.vfwsub.nxv8f64.nxv8f32.nxv8f32( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -395,10 +397,10 @@ declare @llvm.riscv.vfwsub.mask.nxv8f64.nxv8f32.nxv8f32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwsub_mask_vv_nxv8f64_nxv8f32_nxv8f32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vfwsub_mask_vv_nxv8f64_nxv8f32_nxv8f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv8f64_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -410,7 +412,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -418,9 +420,9 @@ entry: declare @llvm.riscv.vfwsub.nxv1f32.nxv1f16.f16( , half, - i32); + iXLen); -define @intrinsic_vfwsub_vf_nxv1f32_nxv1f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfwsub_vf_nxv1f32_nxv1f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_vf_nxv1f32_nxv1f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -431,7 +433,7 @@ entry: %a = call @llvm.riscv.vfwsub.nxv1f32.nxv1f16.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -441,10 +443,10 @@ declare @llvm.riscv.vfwsub.mask.nxv1f32.nxv1f16.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwsub_mask_vf_nxv1f32_nxv1f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfwsub_mask_vf_nxv1f32_nxv1f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv1f32_nxv1f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -456,7 +458,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -464,9 +466,9 @@ entry: declare @llvm.riscv.vfwsub.nxv2f32.nxv2f16.f16( , half, - i32); + iXLen); -define @intrinsic_vfwsub_vf_nxv2f32_nxv2f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfwsub_vf_nxv2f32_nxv2f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_vf_nxv2f32_nxv2f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -477,7 +479,7 @@ entry: %a = call @llvm.riscv.vfwsub.nxv2f32.nxv2f16.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -487,10 +489,10 @@ declare @llvm.riscv.vfwsub.mask.nxv2f32.nxv2f16.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwsub_mask_vf_nxv2f32_nxv2f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfwsub_mask_vf_nxv2f32_nxv2f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv2f32_nxv2f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -502,7 +504,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -510,9 +512,9 @@ entry: declare @llvm.riscv.vfwsub.nxv4f32.nxv4f16.f16( , half, - i32); + iXLen); -define @intrinsic_vfwsub_vf_nxv4f32_nxv4f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfwsub_vf_nxv4f32_nxv4f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_vf_nxv4f32_nxv4f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -523,7 +525,7 @@ entry: %a = call @llvm.riscv.vfwsub.nxv4f32.nxv4f16.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -533,10 +535,10 @@ declare @llvm.riscv.vfwsub.mask.nxv4f32.nxv4f16.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwsub_mask_vf_nxv4f32_nxv4f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfwsub_mask_vf_nxv4f32_nxv4f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv4f32_nxv4f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -548,7 +550,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -556,9 +558,9 @@ entry: declare @llvm.riscv.vfwsub.nxv8f32.nxv8f16.f16( , half, - i32); + iXLen); -define @intrinsic_vfwsub_vf_nxv8f32_nxv8f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfwsub_vf_nxv8f32_nxv8f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_vf_nxv8f32_nxv8f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -569,7 +571,7 @@ entry: %a = call @llvm.riscv.vfwsub.nxv8f32.nxv8f16.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -579,10 +581,10 @@ declare @llvm.riscv.vfwsub.mask.nxv8f32.nxv8f16.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwsub_mask_vf_nxv8f32_nxv8f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfwsub_mask_vf_nxv8f32_nxv8f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv8f32_nxv8f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -594,7 +596,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -602,9 +604,9 @@ entry: declare @llvm.riscv.vfwsub.nxv16f32.nxv16f16.f16( , half, - i32); + iXLen); -define @intrinsic_vfwsub_vf_nxv16f32_nxv16f16_f16( %0, half %1, i32 %2) nounwind { +define @intrinsic_vfwsub_vf_nxv16f32_nxv16f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_vf_nxv16f32_nxv16f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -615,7 +617,7 @@ entry: %a = call @llvm.riscv.vfwsub.nxv16f32.nxv16f16.f16( %0, half %1, - i32 %2) + iXLen %2) ret %a } @@ -625,10 +627,10 @@ declare @llvm.riscv.vfwsub.mask.nxv16f32.nxv16f16.f16( , half, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwsub_mask_vf_nxv16f32_nxv16f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +define @intrinsic_vfwsub_mask_vf_nxv16f32_nxv16f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv16f32_nxv16f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -640,7 +642,7 @@ entry: %1, half %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -648,9 +650,9 @@ entry: declare @llvm.riscv.vfwsub.nxv1f64.nxv1f32.f32( , float, - i32); + iXLen); -define @intrinsic_vfwsub_vf_nxv1f64_nxv1f32_f32( %0, float %1, i32 %2) nounwind { +define @intrinsic_vfwsub_vf_nxv1f64_nxv1f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_vf_nxv1f64_nxv1f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -661,7 +663,7 @@ entry: %a = call @llvm.riscv.vfwsub.nxv1f64.nxv1f32.f32( %0, float %1, - i32 %2) + iXLen %2) ret %a } @@ -671,10 +673,10 @@ declare @llvm.riscv.vfwsub.mask.nxv1f64.nxv1f32.f32( , float, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwsub_mask_vf_nxv1f64_nxv1f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +define @intrinsic_vfwsub_mask_vf_nxv1f64_nxv1f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv1f64_nxv1f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -686,7 +688,7 @@ entry: %1, float %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -694,9 +696,9 @@ entry: declare @llvm.riscv.vfwsub.nxv2f64.nxv2f32.f32( , float, - i32); + iXLen); -define @intrinsic_vfwsub_vf_nxv2f64_nxv2f32_f32( %0, float %1, i32 %2) nounwind { +define @intrinsic_vfwsub_vf_nxv2f64_nxv2f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_vf_nxv2f64_nxv2f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -707,7 +709,7 @@ entry: %a = call @llvm.riscv.vfwsub.nxv2f64.nxv2f32.f32( %0, float %1, - i32 %2) + iXLen %2) ret %a } @@ -717,10 +719,10 @@ declare @llvm.riscv.vfwsub.mask.nxv2f64.nxv2f32.f32( , float, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwsub_mask_vf_nxv2f64_nxv2f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +define @intrinsic_vfwsub_mask_vf_nxv2f64_nxv2f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv2f64_nxv2f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -732,7 +734,7 @@ entry: %1, float %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -740,9 +742,9 @@ entry: declare @llvm.riscv.vfwsub.nxv4f64.nxv4f32.f32( , float, - i32); + iXLen); -define @intrinsic_vfwsub_vf_nxv4f64_nxv4f32_f32( %0, float %1, i32 %2) nounwind { +define @intrinsic_vfwsub_vf_nxv4f64_nxv4f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_vf_nxv4f64_nxv4f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -753,7 +755,7 @@ entry: %a = call @llvm.riscv.vfwsub.nxv4f64.nxv4f32.f32( %0, float %1, - i32 %2) + iXLen %2) ret %a } @@ -763,10 +765,10 @@ declare @llvm.riscv.vfwsub.mask.nxv4f64.nxv4f32.f32( , float, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwsub_mask_vf_nxv4f64_nxv4f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +define @intrinsic_vfwsub_mask_vf_nxv4f64_nxv4f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv4f64_nxv4f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -778,7 +780,7 @@ entry: %1, float %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -786,9 +788,9 @@ entry: declare @llvm.riscv.vfwsub.nxv8f64.nxv8f32.f32( , float, - i32); + iXLen); -define @intrinsic_vfwsub_vf_nxv8f64_nxv8f32_f32( %0, float %1, i32 %2) nounwind { +define @intrinsic_vfwsub_vf_nxv8f64_nxv8f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_vf_nxv8f64_nxv8f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -799,7 +801,7 @@ entry: %a = call @llvm.riscv.vfwsub.nxv8f64.nxv8f32.f32( %0, float %1, - i32 %2) + iXLen %2) ret %a } @@ -809,10 +811,10 @@ declare @llvm.riscv.vfwsub.mask.nxv8f64.nxv8f32.f32( , float, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vfwsub_mask_vf_nxv8f64_nxv8f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +define @intrinsic_vfwsub_mask_vf_nxv8f64_nxv8f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv8f64_nxv8f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -824,7 +826,7 @@ entry: %1, float %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwsub.w-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwsub.w-rv32.ll deleted file mode 100644 index da2290be93d2..000000000000 --- a/llvm/test/CodeGen/RISCV/rvv/vfwsub.w-rv32.ll +++ /dev/null @@ -1,1248 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: -target-abi=ilp32d < %s | FileCheck %s -declare @llvm.riscv.vfwsub.w.nxv1f32.nxv1f16( - , - , - i32); - -define @intrinsic_vfwsub.w_wv_nxv1f32_nxv1f32_nxv1f16( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv1f32_nxv1f32_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfwsub.wv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.w.nxv1f32.nxv1f16( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfwsub.w.mask.nxv1f32.nxv1f16( - , - , - , - , - i32, - i32); - -define @intrinsic_vfwsub.w_mask_wv_nxv1f32_nxv1f32_nxv1f16( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv1f32_nxv1f32_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfwsub.wv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.w.mask.nxv1f32.nxv1f16( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfwsub.w.nxv2f32.nxv2f16( - , - , - i32); - -define @intrinsic_vfwsub.w_wv_nxv2f32_nxv2f32_nxv2f16( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv2f32_nxv2f32_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfwsub.wv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.w.nxv2f32.nxv2f16( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfwsub.w.mask.nxv2f32.nxv2f16( - , - , - , - , - i32, - i32); - -define @intrinsic_vfwsub.w_mask_wv_nxv2f32_nxv2f32_nxv2f16( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv2f32_nxv2f32_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfwsub.wv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.w.mask.nxv2f32.nxv2f16( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfwsub.w.nxv4f32.nxv4f16( - , - , - i32); - -define @intrinsic_vfwsub.w_wv_nxv4f32_nxv4f32_nxv4f16( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv4f32_nxv4f32_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfwsub.wv v8, v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.w.nxv4f32.nxv4f16( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfwsub.w.mask.nxv4f32.nxv4f16( - , - , - , - , - i32, - i32); - -define @intrinsic_vfwsub.w_mask_wv_nxv4f32_nxv4f32_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv4f32_nxv4f32_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfwsub.wv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.w.mask.nxv4f32.nxv4f16( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfwsub.w.nxv8f32.nxv8f16( - , - , - i32); - -define @intrinsic_vfwsub.w_wv_nxv8f32_nxv8f32_nxv8f16( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv8f32_nxv8f32_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfwsub.wv v8, v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.w.nxv8f32.nxv8f16( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfwsub.w.mask.nxv8f32.nxv8f16( - , - , - , - , - i32, - i32); - -define @intrinsic_vfwsub.w_mask_wv_nxv8f32_nxv8f32_nxv8f16( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv8f32_nxv8f32_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfwsub.wv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.w.mask.nxv8f32.nxv8f16( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfwsub.w.nxv16f32.nxv16f16( - , - , - i32); - -define @intrinsic_vfwsub.w_wv_nxv16f32_nxv16f32_nxv16f16( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv16f32_nxv16f32_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfwsub.wv v8, v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.w.nxv16f32.nxv16f16( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfwsub.w.mask.nxv16f32.nxv16f16( - , - , - , - , - i32, - i32); - -define @intrinsic_vfwsub.w_mask_wv_nxv16f32_nxv16f32_nxv16f16( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv16f32_nxv16f32_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vl4re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vfwsub.wv v8, v16, v24, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.w.mask.nxv16f32.nxv16f16( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfwsub.w.nxv1f64.nxv1f32( - , - , - i32); - -define @intrinsic_vfwsub.w_wv_nxv1f64_nxv1f64_nxv1f32( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv1f64_nxv1f64_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfwsub.wv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.w.nxv1f64.nxv1f32( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfwsub.w.mask.nxv1f64.nxv1f32( - , - , - , - , - i32, - i32); - -define @intrinsic_vfwsub.w_mask_wv_nxv1f64_nxv1f64_nxv1f32( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv1f64_nxv1f64_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfwsub.wv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.w.mask.nxv1f64.nxv1f32( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfwsub.w.nxv2f64.nxv2f32( - , - , - i32); - -define @intrinsic_vfwsub.w_wv_nxv2f64_nxv2f64_nxv2f32( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv2f64_nxv2f64_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfwsub.wv v8, v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.w.nxv2f64.nxv2f32( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfwsub.w.mask.nxv2f64.nxv2f32( - , - , - , - , - i32, - i32); - -define @intrinsic_vfwsub.w_mask_wv_nxv2f64_nxv2f64_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv2f64_nxv2f64_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfwsub.wv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.w.mask.nxv2f64.nxv2f32( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfwsub.w.nxv4f64.nxv4f32( - , - , - i32); - -define @intrinsic_vfwsub.w_wv_nxv4f64_nxv4f64_nxv4f32( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv4f64_nxv4f64_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfwsub.wv v8, v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.w.nxv4f64.nxv4f32( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfwsub.w.mask.nxv4f64.nxv4f32( - , - , - , - , - i32, - i32); - -define @intrinsic_vfwsub.w_mask_wv_nxv4f64_nxv4f64_nxv4f32( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv4f64_nxv4f64_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfwsub.wv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.w.mask.nxv4f64.nxv4f32( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfwsub.w.nxv8f64.nxv8f32( - , - , - i32); - -define @intrinsic_vfwsub.w_wv_nxv8f64_nxv8f64_nxv8f32( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv8f64_nxv8f64_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfwsub.wv v8, v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.w.nxv8f64.nxv8f32( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfwsub.w.mask.nxv8f64.nxv8f32( - , - , - , - , - i32, - i32); - -define @intrinsic_vfwsub.w_mask_wv_nxv8f64_nxv8f64_nxv8f32( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv8f64_nxv8f64_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vl4re32.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vfwsub.wv v8, v16, v24, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.w.mask.nxv8f64.nxv8f32( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfwsub.w.nxv1f32.f16( - , - half, - i32); - -define @intrinsic_vfwsub.w_wf_nxv1f32_nxv1f32_f16( %0, half %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv1f32_nxv1f32_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfwsub.wf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.w.nxv1f32.f16( - %0, - half %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfwsub.w.mask.nxv1f32.f16( - , - , - half, - , - i32, - i32); - -define @intrinsic_vfwsub.w_mask_wf_nxv1f32_nxv1f32_f16( %0, %1, half %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv1f32_nxv1f32_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfwsub.wf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.w.mask.nxv1f32.f16( - %0, - %1, - half %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfwsub.w.nxv2f32.f16( - , - half, - i32); - -define @intrinsic_vfwsub.w_wf_nxv2f32_nxv2f32_f16( %0, half %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv2f32_nxv2f32_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfwsub.wf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.w.nxv2f32.f16( - %0, - half %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfwsub.w.mask.nxv2f32.f16( - , - , - half, - , - i32, - i32); - -define @intrinsic_vfwsub.w_mask_wf_nxv2f32_nxv2f32_f16( %0, %1, half %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv2f32_nxv2f32_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfwsub.wf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.w.mask.nxv2f32.f16( - %0, - %1, - half %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfwsub.w.nxv4f32.f16( - , - half, - i32); - -define @intrinsic_vfwsub.w_wf_nxv4f32_nxv4f32_f16( %0, half %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv4f32_nxv4f32_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfwsub.wf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.w.nxv4f32.f16( - %0, - half %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfwsub.w.mask.nxv4f32.f16( - , - , - half, - , - i32, - i32); - -define @intrinsic_vfwsub.w_mask_wf_nxv4f32_nxv4f32_f16( %0, %1, half %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv4f32_nxv4f32_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfwsub.wf v8, v10, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.w.mask.nxv4f32.f16( - %0, - %1, - half %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfwsub.w.nxv8f32.f16( - , - half, - i32); - -define @intrinsic_vfwsub.w_wf_nxv8f32_nxv8f32_f16( %0, half %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv8f32_nxv8f32_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfwsub.wf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.w.nxv8f32.f16( - %0, - half %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfwsub.w.mask.nxv8f32.f16( - , - , - half, - , - i32, - i32); - -define @intrinsic_vfwsub.w_mask_wf_nxv8f32_nxv8f32_f16( %0, %1, half %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv8f32_nxv8f32_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfwsub.wf v8, v12, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.w.mask.nxv8f32.f16( - %0, - %1, - half %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfwsub.w.nxv16f32.f16( - , - half, - i32); - -define @intrinsic_vfwsub.w_wf_nxv16f32_nxv16f32_f16( %0, half %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv16f32_nxv16f32_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfwsub.wf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.w.nxv16f32.f16( - %0, - half %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfwsub.w.mask.nxv16f32.f16( - , - , - half, - , - i32, - i32); - -define @intrinsic_vfwsub.w_mask_wf_nxv16f32_nxv16f32_f16( %0, %1, half %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv16f32_nxv16f32_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfwsub.wf v8, v16, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.w.mask.nxv16f32.f16( - %0, - %1, - half %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfwsub.w.nxv1f64.f32( - , - float, - i32); - -define @intrinsic_vfwsub.w_wf_nxv1f64_nxv1f64_f32( %0, float %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv1f64_nxv1f64_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfwsub.wf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.w.nxv1f64.f32( - %0, - float %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfwsub.w.mask.nxv1f64.f32( - , - , - float, - , - i32, - i32); - -define @intrinsic_vfwsub.w_mask_wf_nxv1f64_nxv1f64_f32( %0, %1, float %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv1f64_nxv1f64_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfwsub.wf v8, v9, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.w.mask.nxv1f64.f32( - %0, - %1, - float %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfwsub.w.nxv2f64.f32( - , - float, - i32); - -define @intrinsic_vfwsub.w_wf_nxv2f64_nxv2f64_f32( %0, float %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv2f64_nxv2f64_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfwsub.wf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.w.nxv2f64.f32( - %0, - float %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfwsub.w.mask.nxv2f64.f32( - , - , - float, - , - i32, - i32); - -define @intrinsic_vfwsub.w_mask_wf_nxv2f64_nxv2f64_f32( %0, %1, float %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv2f64_nxv2f64_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfwsub.wf v8, v10, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.w.mask.nxv2f64.f32( - %0, - %1, - float %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfwsub.w.nxv4f64.f32( - , - float, - i32); - -define @intrinsic_vfwsub.w_wf_nxv4f64_nxv4f64_f32( %0, float %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv4f64_nxv4f64_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfwsub.wf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.w.nxv4f64.f32( - %0, - float %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfwsub.w.mask.nxv4f64.f32( - , - , - float, - , - i32, - i32); - -define @intrinsic_vfwsub.w_mask_wf_nxv4f64_nxv4f64_f32( %0, %1, float %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv4f64_nxv4f64_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfwsub.wf v8, v12, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.w.mask.nxv4f64.f32( - %0, - %1, - float %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vfwsub.w.nxv8f64.f32( - , - float, - i32); - -define @intrinsic_vfwsub.w_wf_nxv8f64_nxv8f64_f32( %0, float %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv8f64_nxv8f64_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfwsub.wf v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.w.nxv8f64.f32( - %0, - float %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vfwsub.w.mask.nxv8f64.f32( - , - , - float, - , - i32, - i32); - -define @intrinsic_vfwsub.w_mask_wf_nxv8f64_nxv8f64_f32( %0, %1, float %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv8f64_nxv8f64_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfwsub.wf v8, v16, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.w.mask.nxv8f64.f32( - %0, - %1, - float %2, - %3, - i32 %4, i32 1) - - ret %a -} - -define @intrinsic_vfwsub.w_mask_wv_tie_nxv1f32_nxv1f32_nxv1f16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_tie_nxv1f32_nxv1f32_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfwsub.wv v8, v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.w.mask.nxv1f32.nxv1f16( - %0, - %0, - %1, - %2, - i32 %3, i32 1) - - ret %a -} - -define @intrinsic_vfwsub.w_mask_wv_tie_nxv2f32_nxv2f32_nxv2f16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_tie_nxv2f32_nxv2f32_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfwsub.wv v8, v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.w.mask.nxv2f32.nxv2f16( - %0, - %0, - %1, - %2, - i32 %3, i32 1) - - ret %a -} - -define @intrinsic_vfwsub.w_mask_wv_tie_nxv4f32_nxv4f32_nxv4f16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_tie_nxv4f32_nxv4f32_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfwsub.wv v8, v8, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.w.mask.nxv4f32.nxv4f16( - %0, - %0, - %1, - %2, - i32 %3, i32 1) - - ret %a -} - -define @intrinsic_vfwsub.w_mask_wv_tie_nxv8f32_nxv8f32_nxv8f16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_tie_nxv8f32_nxv8f32_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfwsub.wv v8, v8, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.w.mask.nxv8f32.nxv8f16( - %0, - %0, - %1, - %2, - i32 %3, i32 1) - - ret %a -} - -define @intrinsic_vfwsub.w_mask_wv_tie_nxv16f32_nxv16f32_nxv16f16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_tie_nxv16f32_nxv16f32_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfwsub.wv v8, v8, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.w.mask.nxv16f32.nxv16f16( - %0, - %0, - %1, - %2, - i32 %3, i32 1) - - ret %a -} - -define @intrinsic_vfwsub.w_mask_wv_tie_nxv1f64_nxv1f64_nxv1f32( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_tie_nxv1f64_nxv1f64_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfwsub.wv v8, v8, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.w.mask.nxv1f64.nxv1f32( - %0, - %0, - %1, - %2, - i32 %3, i32 1) - - ret %a -} - -define @intrinsic_vfwsub.w_mask_wv_tie_nxv2f64_nxv2f64_nxv2f32( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_tie_nxv2f64_nxv2f64_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfwsub.wv v8, v8, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.w.mask.nxv2f64.nxv2f32( - %0, - %0, - %1, - %2, - i32 %3, i32 1) - - ret %a -} - -define @intrinsic_vfwsub.w_mask_wv_tie_nxv4f64_nxv4f64_nxv4f32( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_tie_nxv4f64_nxv4f64_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfwsub.wv v8, v8, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.w.mask.nxv4f64.nxv4f32( - %0, - %0, - %1, - %2, - i32 %3, i32 1) - - ret %a -} - -define @intrinsic_vfwsub.w_mask_wv_tie_nxv8f64_nxv8f64_nxv8f32( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_tie_nxv8f64_nxv8f64_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfwsub.wv v8, v8, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.w.mask.nxv8f64.nxv8f32( - %0, - %0, - %1, - %2, - i32 %3, i32 1) - - ret %a -} - -define @intrinsic_vfwsub.w_mask_wf_tie_nxv1f32_nxv1f32_f16( %0, half %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_tie_nxv1f32_nxv1f32_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfwsub.wf v8, v8, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.w.mask.nxv1f32.f16( - %0, - %0, - half %1, - %2, - i32 %3, i32 1) - - ret %a -} - -define @intrinsic_vfwsub.w_mask_wf_tie_nxv2f32_nxv2f32_f16( %0, half %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_tie_nxv2f32_nxv2f32_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfwsub.wf v8, v8, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.w.mask.nxv2f32.f16( - %0, - %0, - half %1, - %2, - i32 %3, i32 1) - - ret %a -} - -define @intrinsic_vfwsub.w_mask_wf_tie_nxv4f32_nxv4f32_f16( %0, half %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_tie_nxv4f32_nxv4f32_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfwsub.wf v8, v8, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.w.mask.nxv4f32.f16( - %0, - %0, - half %1, - %2, - i32 %3, i32 1) - - ret %a -} - -define @intrinsic_vfwsub.w_mask_wf_tie_nxv8f32_nxv8f32_f16( %0, half %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_tie_nxv8f32_nxv8f32_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfwsub.wf v8, v8, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.w.mask.nxv8f32.f16( - %0, - %0, - half %1, - %2, - i32 %3, i32 1) - - ret %a -} - -define @intrinsic_vfwsub.w_mask_wf_tie_nxv16f32_nxv16f32_f16( %0, half %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_tie_nxv16f32_nxv16f32_f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfwsub.wf v8, v8, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.w.mask.nxv16f32.f16( - %0, - %0, - half %1, - %2, - i32 %3, i32 1) - - ret %a -} - -define @intrinsic_vfwsub.w_mask_wf_tie_nxv1f64_nxv1f64_f32( %0, float %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_tie_nxv1f64_nxv1f64_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfwsub.wf v8, v8, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.w.mask.nxv1f64.f32( - %0, - %0, - float %1, - %2, - i32 %3, i32 1) - - ret %a -} - -define @intrinsic_vfwsub.w_mask_wf_tie_nxv2f64_nxv2f64_f32( %0, float %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_tie_nxv2f64_nxv2f64_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfwsub.wf v8, v8, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.w.mask.nxv2f64.f32( - %0, - %0, - float %1, - %2, - i32 %3, i32 1) - - ret %a -} - -define @intrinsic_vfwsub.w_mask_wf_tie_nxv4f64_nxv4f64_f32( %0, float %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_tie_nxv4f64_nxv4f64_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfwsub.wf v8, v8, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.w.mask.nxv4f64.f32( - %0, - %0, - float %1, - %2, - i32 %3, i32 1) - - ret %a -} - -define @intrinsic_vfwsub.w_mask_wf_tie_nxv8f64_nxv8f64_f32( %0, float %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_tie_nxv8f64_nxv8f64_f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfwsub.wf v8, v8, fa0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.w.mask.nxv8f64.f32( - %0, - %0, - float %1, - %2, - i32 %3, i32 1) - - ret %a -} - -define @intrinsic_vfwsub.w_wv_untie_nxv1f32_nxv1f32_nxv1f16( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwsub.w_wv_untie_nxv1f32_nxv1f32_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfwsub.wv v10, v9, v8 -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.w.nxv1f32.nxv1f16( - %1, - %0, - i32 %2) - - ret %a -} - -define @intrinsic_vfwsub.w_wv_untie_nxv2f32_nxv2f32_nxv2f16( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwsub.w_wv_untie_nxv2f32_nxv2f32_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfwsub.wv v10, v9, v8 -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.w.nxv2f32.nxv2f16( - %1, - %0, - i32 %2) - - ret %a -} - -define @intrinsic_vfwsub.w_wv_untie_nxv4f32_nxv4f32_nxv4f16( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwsub.w_wv_untie_nxv4f32_nxv4f32_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfwsub.wv v12, v10, v8 -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.w.nxv4f32.nxv4f16( - %1, - %0, - i32 %2) - - ret %a -} - -define @intrinsic_vfwsub.w_wv_untie_nxv8f32_nxv8f32_nxv8f16( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwsub.w_wv_untie_nxv8f32_nxv8f32_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfwsub.wv v16, v12, v8 -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.w.nxv8f32.nxv8f16( - %1, - %0, - i32 %2) - - ret %a -} - -define @intrinsic_vfwsub.w_wv_untie_nxv1f64_nxv1f64_nxv1f32( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwsub.w_wv_untie_nxv1f64_nxv1f64_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfwsub.wv v10, v9, v8 -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.w.nxv1f64.nxv1f32( - %1, - %0, - i32 %2) - - ret %a -} - -define @intrinsic_vfwsub.w_wv_untie_nxv2f64_nxv2f64_nxv2f32( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwsub.w_wv_untie_nxv2f64_nxv2f64_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfwsub.wv v12, v10, v8 -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.w.nxv2f64.nxv2f32( - %1, - %0, - i32 %2) - - ret %a -} - -define @intrinsic_vfwsub.w_wv_untie_nxv4f64_nxv4f64_nxv4f32( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwsub.w_wv_untie_nxv4f64_nxv4f64_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfwsub.wv v16, v12, v8 -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.w.nxv4f64.nxv4f32( - %1, - %0, - i32 %2) - - ret %a -} - -define @intrinsic_vfwsub.w_wv_untie_nxv8f64_nxv8f64_nxv8f32( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfwsub.w_wv_untie_nxv8f64_nxv8f64_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfwsub.wv v24, v16, v8 -; CHECK-NEXT: vmv8r.v v8, v24 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vfwsub.w.nxv8f64.nxv8f32( - %1, - %0, - i32 %2) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwsub.w-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwsub.w.ll similarity index 88% rename from llvm/test/CodeGen/RISCV/rvv/vfwsub.w-rv64.ll rename to llvm/test/CodeGen/RISCV/rvv/vfwsub.w.ll index ec0bd527dafe..b5d008c3e1ed 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfwsub.w-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwsub.w.ll @@ -1,12 +1,14 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \ -; RUN: -target-abi=lp64d < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfwsub.w.nxv1f32.nxv1f16( , , - i64); + iXLen); -define @intrinsic_vfwsub.w_wv_nxv1f32_nxv1f32_nxv1f16( %0, %1, i64 %2) nounwind { +define @intrinsic_vfwsub.w_wv_nxv1f32_nxv1f32_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv1f32_nxv1f32_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -16,7 +18,7 @@ entry: %a = call @llvm.riscv.vfwsub.w.nxv1f32.nxv1f16( %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -26,10 +28,10 @@ declare @llvm.riscv.vfwsub.w.mask.nxv1f32.nxv1f16( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfwsub.w_mask_wv_nxv1f32_nxv1f32_nxv1f16( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfwsub.w_mask_wv_nxv1f32_nxv1f32_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv1f32_nxv1f32_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -41,7 +43,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -49,9 +51,9 @@ entry: declare @llvm.riscv.vfwsub.w.nxv2f32.nxv2f16( , , - i64); + iXLen); -define @intrinsic_vfwsub.w_wv_nxv2f32_nxv2f32_nxv2f16( %0, %1, i64 %2) nounwind { +define @intrinsic_vfwsub.w_wv_nxv2f32_nxv2f32_nxv2f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv2f32_nxv2f32_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -61,7 +63,7 @@ entry: %a = call @llvm.riscv.vfwsub.w.nxv2f32.nxv2f16( %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -71,10 +73,10 @@ declare @llvm.riscv.vfwsub.w.mask.nxv2f32.nxv2f16( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfwsub.w_mask_wv_nxv2f32_nxv2f32_nxv2f16( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfwsub.w_mask_wv_nxv2f32_nxv2f32_nxv2f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv2f32_nxv2f32_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -86,7 +88,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -94,9 +96,9 @@ entry: declare @llvm.riscv.vfwsub.w.nxv4f32.nxv4f16( , , - i64); + iXLen); -define @intrinsic_vfwsub.w_wv_nxv4f32_nxv4f32_nxv4f16( %0, %1, i64 %2) nounwind { +define @intrinsic_vfwsub.w_wv_nxv4f32_nxv4f32_nxv4f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv4f32_nxv4f32_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -106,7 +108,7 @@ entry: %a = call @llvm.riscv.vfwsub.w.nxv4f32.nxv4f16( %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -116,10 +118,10 @@ declare @llvm.riscv.vfwsub.w.mask.nxv4f32.nxv4f16( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfwsub.w_mask_wv_nxv4f32_nxv4f32_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfwsub.w_mask_wv_nxv4f32_nxv4f32_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv4f32_nxv4f32_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -131,7 +133,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -139,9 +141,9 @@ entry: declare @llvm.riscv.vfwsub.w.nxv8f32.nxv8f16( , , - i64); + iXLen); -define @intrinsic_vfwsub.w_wv_nxv8f32_nxv8f32_nxv8f16( %0, %1, i64 %2) nounwind { +define @intrinsic_vfwsub.w_wv_nxv8f32_nxv8f32_nxv8f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv8f32_nxv8f32_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -151,7 +153,7 @@ entry: %a = call @llvm.riscv.vfwsub.w.nxv8f32.nxv8f16( %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -161,10 +163,10 @@ declare @llvm.riscv.vfwsub.w.mask.nxv8f32.nxv8f16( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfwsub.w_mask_wv_nxv8f32_nxv8f32_nxv8f16( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfwsub.w_mask_wv_nxv8f32_nxv8f32_nxv8f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv8f32_nxv8f32_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -176,7 +178,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -184,9 +186,9 @@ entry: declare @llvm.riscv.vfwsub.w.nxv16f32.nxv16f16( , , - i64); + iXLen); -define @intrinsic_vfwsub.w_wv_nxv16f32_nxv16f32_nxv16f16( %0, %1, i64 %2) nounwind { +define @intrinsic_vfwsub.w_wv_nxv16f32_nxv16f32_nxv16f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv16f32_nxv16f32_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -196,7 +198,7 @@ entry: %a = call @llvm.riscv.vfwsub.w.nxv16f32.nxv16f16( %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -206,10 +208,10 @@ declare @llvm.riscv.vfwsub.w.mask.nxv16f32.nxv16f16( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfwsub.w_mask_wv_nxv16f32_nxv16f32_nxv16f16( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfwsub.w_mask_wv_nxv16f32_nxv16f32_nxv16f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv16f32_nxv16f32_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl4re16.v v24, (a0) @@ -222,7 +224,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -230,9 +232,9 @@ entry: declare @llvm.riscv.vfwsub.w.nxv1f64.nxv1f32( , , - i64); + iXLen); -define @intrinsic_vfwsub.w_wv_nxv1f64_nxv1f64_nxv1f32( %0, %1, i64 %2) nounwind { +define @intrinsic_vfwsub.w_wv_nxv1f64_nxv1f64_nxv1f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv1f64_nxv1f64_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -242,7 +244,7 @@ entry: %a = call @llvm.riscv.vfwsub.w.nxv1f64.nxv1f32( %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -252,10 +254,10 @@ declare @llvm.riscv.vfwsub.w.mask.nxv1f64.nxv1f32( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfwsub.w_mask_wv_nxv1f64_nxv1f64_nxv1f32( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfwsub.w_mask_wv_nxv1f64_nxv1f64_nxv1f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv1f64_nxv1f64_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -267,7 +269,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -275,9 +277,9 @@ entry: declare @llvm.riscv.vfwsub.w.nxv2f64.nxv2f32( , , - i64); + iXLen); -define @intrinsic_vfwsub.w_wv_nxv2f64_nxv2f64_nxv2f32( %0, %1, i64 %2) nounwind { +define @intrinsic_vfwsub.w_wv_nxv2f64_nxv2f64_nxv2f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv2f64_nxv2f64_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -287,7 +289,7 @@ entry: %a = call @llvm.riscv.vfwsub.w.nxv2f64.nxv2f32( %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -297,10 +299,10 @@ declare @llvm.riscv.vfwsub.w.mask.nxv2f64.nxv2f32( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfwsub.w_mask_wv_nxv2f64_nxv2f64_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfwsub.w_mask_wv_nxv2f64_nxv2f64_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv2f64_nxv2f64_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -312,7 +314,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -320,9 +322,9 @@ entry: declare @llvm.riscv.vfwsub.w.nxv4f64.nxv4f32( , , - i64); + iXLen); -define @intrinsic_vfwsub.w_wv_nxv4f64_nxv4f64_nxv4f32( %0, %1, i64 %2) nounwind { +define @intrinsic_vfwsub.w_wv_nxv4f64_nxv4f64_nxv4f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv4f64_nxv4f64_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -332,7 +334,7 @@ entry: %a = call @llvm.riscv.vfwsub.w.nxv4f64.nxv4f32( %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -342,10 +344,10 @@ declare @llvm.riscv.vfwsub.w.mask.nxv4f64.nxv4f32( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfwsub.w_mask_wv_nxv4f64_nxv4f64_nxv4f32( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfwsub.w_mask_wv_nxv4f64_nxv4f64_nxv4f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv4f64_nxv4f64_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -357,7 +359,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -365,9 +367,9 @@ entry: declare @llvm.riscv.vfwsub.w.nxv8f64.nxv8f32( , , - i64); + iXLen); -define @intrinsic_vfwsub.w_wv_nxv8f64_nxv8f64_nxv8f32( %0, %1, i64 %2) nounwind { +define @intrinsic_vfwsub.w_wv_nxv8f64_nxv8f64_nxv8f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv8f64_nxv8f64_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -377,7 +379,7 @@ entry: %a = call @llvm.riscv.vfwsub.w.nxv8f64.nxv8f32( %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -387,10 +389,10 @@ declare @llvm.riscv.vfwsub.w.mask.nxv8f64.nxv8f32( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfwsub.w_mask_wv_nxv8f64_nxv8f64_nxv8f32( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vfwsub.w_mask_wv_nxv8f64_nxv8f64_nxv8f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv8f64_nxv8f64_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl4re32.v v24, (a0) @@ -403,7 +405,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -411,9 +413,9 @@ entry: declare @llvm.riscv.vfwsub.w.nxv1f32.f16( , half, - i64); + iXLen); -define @intrinsic_vfwsub.w_wf_nxv1f32_nxv1f32_f16( %0, half %1, i64 %2) nounwind { +define @intrinsic_vfwsub.w_wf_nxv1f32_nxv1f32_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv1f32_nxv1f32_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -423,7 +425,7 @@ entry: %a = call @llvm.riscv.vfwsub.w.nxv1f32.f16( %0, half %1, - i64 %2) + iXLen %2) ret %a } @@ -433,10 +435,10 @@ declare @llvm.riscv.vfwsub.w.mask.nxv1f32.f16( , half, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfwsub.w_mask_wf_nxv1f32_nxv1f32_f16( %0, %1, half %2, %3, i64 %4) nounwind { +define @intrinsic_vfwsub.w_mask_wf_nxv1f32_nxv1f32_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv1f32_nxv1f32_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -448,7 +450,7 @@ entry: %1, half %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -456,9 +458,9 @@ entry: declare @llvm.riscv.vfwsub.w.nxv2f32.f16( , half, - i64); + iXLen); -define @intrinsic_vfwsub.w_wf_nxv2f32_nxv2f32_f16( %0, half %1, i64 %2) nounwind { +define @intrinsic_vfwsub.w_wf_nxv2f32_nxv2f32_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv2f32_nxv2f32_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -468,7 +470,7 @@ entry: %a = call @llvm.riscv.vfwsub.w.nxv2f32.f16( %0, half %1, - i64 %2) + iXLen %2) ret %a } @@ -478,10 +480,10 @@ declare @llvm.riscv.vfwsub.w.mask.nxv2f32.f16( , half, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfwsub.w_mask_wf_nxv2f32_nxv2f32_f16( %0, %1, half %2, %3, i64 %4) nounwind { +define @intrinsic_vfwsub.w_mask_wf_nxv2f32_nxv2f32_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv2f32_nxv2f32_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -493,7 +495,7 @@ entry: %1, half %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -501,9 +503,9 @@ entry: declare @llvm.riscv.vfwsub.w.nxv4f32.f16( , half, - i64); + iXLen); -define @intrinsic_vfwsub.w_wf_nxv4f32_nxv4f32_f16( %0, half %1, i64 %2) nounwind { +define @intrinsic_vfwsub.w_wf_nxv4f32_nxv4f32_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv4f32_nxv4f32_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -513,7 +515,7 @@ entry: %a = call @llvm.riscv.vfwsub.w.nxv4f32.f16( %0, half %1, - i64 %2) + iXLen %2) ret %a } @@ -523,10 +525,10 @@ declare @llvm.riscv.vfwsub.w.mask.nxv4f32.f16( , half, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfwsub.w_mask_wf_nxv4f32_nxv4f32_f16( %0, %1, half %2, %3, i64 %4) nounwind { +define @intrinsic_vfwsub.w_mask_wf_nxv4f32_nxv4f32_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv4f32_nxv4f32_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -538,7 +540,7 @@ entry: %1, half %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -546,9 +548,9 @@ entry: declare @llvm.riscv.vfwsub.w.nxv8f32.f16( , half, - i64); + iXLen); -define @intrinsic_vfwsub.w_wf_nxv8f32_nxv8f32_f16( %0, half %1, i64 %2) nounwind { +define @intrinsic_vfwsub.w_wf_nxv8f32_nxv8f32_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv8f32_nxv8f32_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -558,7 +560,7 @@ entry: %a = call @llvm.riscv.vfwsub.w.nxv8f32.f16( %0, half %1, - i64 %2) + iXLen %2) ret %a } @@ -568,10 +570,10 @@ declare @llvm.riscv.vfwsub.w.mask.nxv8f32.f16( , half, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfwsub.w_mask_wf_nxv8f32_nxv8f32_f16( %0, %1, half %2, %3, i64 %4) nounwind { +define @intrinsic_vfwsub.w_mask_wf_nxv8f32_nxv8f32_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv8f32_nxv8f32_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -583,7 +585,7 @@ entry: %1, half %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -591,9 +593,9 @@ entry: declare @llvm.riscv.vfwsub.w.nxv16f32.f16( , half, - i64); + iXLen); -define @intrinsic_vfwsub.w_wf_nxv16f32_nxv16f32_f16( %0, half %1, i64 %2) nounwind { +define @intrinsic_vfwsub.w_wf_nxv16f32_nxv16f32_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv16f32_nxv16f32_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -603,7 +605,7 @@ entry: %a = call @llvm.riscv.vfwsub.w.nxv16f32.f16( %0, half %1, - i64 %2) + iXLen %2) ret %a } @@ -613,10 +615,10 @@ declare @llvm.riscv.vfwsub.w.mask.nxv16f32.f16( , half, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfwsub.w_mask_wf_nxv16f32_nxv16f32_f16( %0, %1, half %2, %3, i64 %4) nounwind { +define @intrinsic_vfwsub.w_mask_wf_nxv16f32_nxv16f32_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv16f32_nxv16f32_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -628,7 +630,7 @@ entry: %1, half %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -636,9 +638,9 @@ entry: declare @llvm.riscv.vfwsub.w.nxv1f64.f32( , float, - i64); + iXLen); -define @intrinsic_vfwsub.w_wf_nxv1f64_nxv1f64_f32( %0, float %1, i64 %2) nounwind { +define @intrinsic_vfwsub.w_wf_nxv1f64_nxv1f64_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv1f64_nxv1f64_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -648,7 +650,7 @@ entry: %a = call @llvm.riscv.vfwsub.w.nxv1f64.f32( %0, float %1, - i64 %2) + iXLen %2) ret %a } @@ -658,10 +660,10 @@ declare @llvm.riscv.vfwsub.w.mask.nxv1f64.f32( , float, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfwsub.w_mask_wf_nxv1f64_nxv1f64_f32( %0, %1, float %2, %3, i64 %4) nounwind { +define @intrinsic_vfwsub.w_mask_wf_nxv1f64_nxv1f64_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv1f64_nxv1f64_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -673,7 +675,7 @@ entry: %1, float %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -681,9 +683,9 @@ entry: declare @llvm.riscv.vfwsub.w.nxv2f64.f32( , float, - i64); + iXLen); -define @intrinsic_vfwsub.w_wf_nxv2f64_nxv2f64_f32( %0, float %1, i64 %2) nounwind { +define @intrinsic_vfwsub.w_wf_nxv2f64_nxv2f64_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv2f64_nxv2f64_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -693,7 +695,7 @@ entry: %a = call @llvm.riscv.vfwsub.w.nxv2f64.f32( %0, float %1, - i64 %2) + iXLen %2) ret %a } @@ -703,10 +705,10 @@ declare @llvm.riscv.vfwsub.w.mask.nxv2f64.f32( , float, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfwsub.w_mask_wf_nxv2f64_nxv2f64_f32( %0, %1, float %2, %3, i64 %4) nounwind { +define @intrinsic_vfwsub.w_mask_wf_nxv2f64_nxv2f64_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv2f64_nxv2f64_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -718,7 +720,7 @@ entry: %1, float %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -726,9 +728,9 @@ entry: declare @llvm.riscv.vfwsub.w.nxv4f64.f32( , float, - i64); + iXLen); -define @intrinsic_vfwsub.w_wf_nxv4f64_nxv4f64_f32( %0, float %1, i64 %2) nounwind { +define @intrinsic_vfwsub.w_wf_nxv4f64_nxv4f64_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv4f64_nxv4f64_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -738,7 +740,7 @@ entry: %a = call @llvm.riscv.vfwsub.w.nxv4f64.f32( %0, float %1, - i64 %2) + iXLen %2) ret %a } @@ -748,10 +750,10 @@ declare @llvm.riscv.vfwsub.w.mask.nxv4f64.f32( , float, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfwsub.w_mask_wf_nxv4f64_nxv4f64_f32( %0, %1, float %2, %3, i64 %4) nounwind { +define @intrinsic_vfwsub.w_mask_wf_nxv4f64_nxv4f64_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv4f64_nxv4f64_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -763,7 +765,7 @@ entry: %1, float %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -771,9 +773,9 @@ entry: declare @llvm.riscv.vfwsub.w.nxv8f64.f32( , float, - i64); + iXLen); -define @intrinsic_vfwsub.w_wf_nxv8f64_nxv8f64_f32( %0, float %1, i64 %2) nounwind { +define @intrinsic_vfwsub.w_wf_nxv8f64_nxv8f64_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv8f64_nxv8f64_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -783,7 +785,7 @@ entry: %a = call @llvm.riscv.vfwsub.w.nxv8f64.f32( %0, float %1, - i64 %2) + iXLen %2) ret %a } @@ -793,10 +795,10 @@ declare @llvm.riscv.vfwsub.w.mask.nxv8f64.f32( , float, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vfwsub.w_mask_wf_nxv8f64_nxv8f64_f32( %0, %1, float %2, %3, i64 %4) nounwind { +define @intrinsic_vfwsub.w_mask_wf_nxv8f64_nxv8f64_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv8f64_nxv8f64_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -808,12 +810,12 @@ entry: %1, float %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } -define @intrinsic_vfwsub.w_mask_wv_tie_nxv1f32_nxv1f32_nxv1f16( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vfwsub.w_mask_wv_tie_nxv1f32_nxv1f32_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_tie_nxv1f32_nxv1f32_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -825,12 +827,12 @@ entry: %0, %1, %2, - i64 %3, i64 1) + iXLen %3, iXLen 1) ret %a } -define @intrinsic_vfwsub.w_mask_wv_tie_nxv2f32_nxv2f32_nxv2f16( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vfwsub.w_mask_wv_tie_nxv2f32_nxv2f32_nxv2f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_tie_nxv2f32_nxv2f32_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -842,12 +844,12 @@ entry: %0, %1, %2, - i64 %3, i64 1) + iXLen %3, iXLen 1) ret %a } -define @intrinsic_vfwsub.w_mask_wv_tie_nxv4f32_nxv4f32_nxv4f16( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vfwsub.w_mask_wv_tie_nxv4f32_nxv4f32_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_tie_nxv4f32_nxv4f32_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -859,12 +861,12 @@ entry: %0, %1, %2, - i64 %3, i64 1) + iXLen %3, iXLen 1) ret %a } -define @intrinsic_vfwsub.w_mask_wv_tie_nxv8f32_nxv8f32_nxv8f16( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vfwsub.w_mask_wv_tie_nxv8f32_nxv8f32_nxv8f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_tie_nxv8f32_nxv8f32_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -876,12 +878,12 @@ entry: %0, %1, %2, - i64 %3, i64 1) + iXLen %3, iXLen 1) ret %a } -define @intrinsic_vfwsub.w_mask_wv_tie_nxv16f32_nxv16f32_nxv16f16( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vfwsub.w_mask_wv_tie_nxv16f32_nxv16f32_nxv16f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_tie_nxv16f32_nxv16f32_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -893,12 +895,12 @@ entry: %0, %1, %2, - i64 %3, i64 1) + iXLen %3, iXLen 1) ret %a } -define @intrinsic_vfwsub.w_mask_wv_tie_nxv1f64_nxv1f64_nxv1f32( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vfwsub.w_mask_wv_tie_nxv1f64_nxv1f64_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_tie_nxv1f64_nxv1f64_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -910,12 +912,12 @@ entry: %0, %1, %2, - i64 %3, i64 1) + iXLen %3, iXLen 1) ret %a } -define @intrinsic_vfwsub.w_mask_wv_tie_nxv2f64_nxv2f64_nxv2f32( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vfwsub.w_mask_wv_tie_nxv2f64_nxv2f64_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_tie_nxv2f64_nxv2f64_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -927,12 +929,12 @@ entry: %0, %1, %2, - i64 %3, i64 1) + iXLen %3, iXLen 1) ret %a } -define @intrinsic_vfwsub.w_mask_wv_tie_nxv4f64_nxv4f64_nxv4f32( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vfwsub.w_mask_wv_tie_nxv4f64_nxv4f64_nxv4f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_tie_nxv4f64_nxv4f64_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -944,12 +946,12 @@ entry: %0, %1, %2, - i64 %3, i64 1) + iXLen %3, iXLen 1) ret %a } -define @intrinsic_vfwsub.w_mask_wv_tie_nxv8f64_nxv8f64_nxv8f32( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vfwsub.w_mask_wv_tie_nxv8f64_nxv8f64_nxv8f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_tie_nxv8f64_nxv8f64_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -961,12 +963,12 @@ entry: %0, %1, %2, - i64 %3, i64 1) + iXLen %3, iXLen 1) ret %a } -define @intrinsic_vfwsub.w_mask_wf_tie_nxv1f32_nxv1f32_f16( %0, half %1, %2, i64 %3) nounwind { +define @intrinsic_vfwsub.w_mask_wf_tie_nxv1f32_nxv1f32_f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_tie_nxv1f32_nxv1f32_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -978,12 +980,12 @@ entry: %0, half %1, %2, - i64 %3, i64 1) + iXLen %3, iXLen 1) ret %a } -define @intrinsic_vfwsub.w_mask_wf_tie_nxv2f32_nxv2f32_f16( %0, half %1, %2, i64 %3) nounwind { +define @intrinsic_vfwsub.w_mask_wf_tie_nxv2f32_nxv2f32_f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_tie_nxv2f32_nxv2f32_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -995,12 +997,12 @@ entry: %0, half %1, %2, - i64 %3, i64 1) + iXLen %3, iXLen 1) ret %a } -define @intrinsic_vfwsub.w_mask_wf_tie_nxv4f32_nxv4f32_f16( %0, half %1, %2, i64 %3) nounwind { +define @intrinsic_vfwsub.w_mask_wf_tie_nxv4f32_nxv4f32_f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_tie_nxv4f32_nxv4f32_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -1012,12 +1014,12 @@ entry: %0, half %1, %2, - i64 %3, i64 1) + iXLen %3, iXLen 1) ret %a } -define @intrinsic_vfwsub.w_mask_wf_tie_nxv8f32_nxv8f32_f16( %0, half %1, %2, i64 %3) nounwind { +define @intrinsic_vfwsub.w_mask_wf_tie_nxv8f32_nxv8f32_f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_tie_nxv8f32_nxv8f32_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -1029,12 +1031,12 @@ entry: %0, half %1, %2, - i64 %3, i64 1) + iXLen %3, iXLen 1) ret %a } -define @intrinsic_vfwsub.w_mask_wf_tie_nxv16f32_nxv16f32_f16( %0, half %1, %2, i64 %3) nounwind { +define @intrinsic_vfwsub.w_mask_wf_tie_nxv16f32_nxv16f32_f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_tie_nxv16f32_nxv16f32_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -1046,12 +1048,12 @@ entry: %0, half %1, %2, - i64 %3, i64 1) + iXLen %3, iXLen 1) ret %a } -define @intrinsic_vfwsub.w_mask_wf_tie_nxv1f64_nxv1f64_f32( %0, float %1, %2, i64 %3) nounwind { +define @intrinsic_vfwsub.w_mask_wf_tie_nxv1f64_nxv1f64_f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_tie_nxv1f64_nxv1f64_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -1063,12 +1065,12 @@ entry: %0, float %1, %2, - i64 %3, i64 1) + iXLen %3, iXLen 1) ret %a } -define @intrinsic_vfwsub.w_mask_wf_tie_nxv2f64_nxv2f64_f32( %0, float %1, %2, i64 %3) nounwind { +define @intrinsic_vfwsub.w_mask_wf_tie_nxv2f64_nxv2f64_f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_tie_nxv2f64_nxv2f64_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -1080,12 +1082,12 @@ entry: %0, float %1, %2, - i64 %3, i64 1) + iXLen %3, iXLen 1) ret %a } -define @intrinsic_vfwsub.w_mask_wf_tie_nxv4f64_nxv4f64_f32( %0, float %1, %2, i64 %3) nounwind { +define @intrinsic_vfwsub.w_mask_wf_tie_nxv4f64_nxv4f64_f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_tie_nxv4f64_nxv4f64_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -1097,12 +1099,12 @@ entry: %0, float %1, %2, - i64 %3, i64 1) + iXLen %3, iXLen 1) ret %a } -define @intrinsic_vfwsub.w_mask_wf_tie_nxv8f64_nxv8f64_f32( %0, float %1, %2, i64 %3) nounwind { +define @intrinsic_vfwsub.w_mask_wf_tie_nxv8f64_nxv8f64_f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_tie_nxv8f64_nxv8f64_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -1114,12 +1116,12 @@ entry: %0, float %1, %2, - i64 %3, i64 1) + iXLen %3, iXLen 1) ret %a } -define @intrinsic_vfwsub.w_wv_untie_nxv1f32_nxv1f32_nxv1f16( %0, %1, i64 %2) nounwind { +define @intrinsic_vfwsub.w_wv_untie_nxv1f32_nxv1f32_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wv_untie_nxv1f32_nxv1f32_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -1130,12 +1132,12 @@ entry: %a = call @llvm.riscv.vfwsub.w.nxv1f32.nxv1f16( %1, %0, - i64 %2) + iXLen %2) ret %a } -define @intrinsic_vfwsub.w_wv_untie_nxv2f32_nxv2f32_nxv2f16( %0, %1, i64 %2) nounwind { +define @intrinsic_vfwsub.w_wv_untie_nxv2f32_nxv2f32_nxv2f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wv_untie_nxv2f32_nxv2f32_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -1146,12 +1148,12 @@ entry: %a = call @llvm.riscv.vfwsub.w.nxv2f32.nxv2f16( %1, %0, - i64 %2) + iXLen %2) ret %a } -define @intrinsic_vfwsub.w_wv_untie_nxv4f32_nxv4f32_nxv4f16( %0, %1, i64 %2) nounwind { +define @intrinsic_vfwsub.w_wv_untie_nxv4f32_nxv4f32_nxv4f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wv_untie_nxv4f32_nxv4f32_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -1162,12 +1164,12 @@ entry: %a = call @llvm.riscv.vfwsub.w.nxv4f32.nxv4f16( %1, %0, - i64 %2) + iXLen %2) ret %a } -define @intrinsic_vfwsub.w_wv_untie_nxv8f32_nxv8f32_nxv8f16( %0, %1, i64 %2) nounwind { +define @intrinsic_vfwsub.w_wv_untie_nxv8f32_nxv8f32_nxv8f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wv_untie_nxv8f32_nxv8f32_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -1178,12 +1180,12 @@ entry: %a = call @llvm.riscv.vfwsub.w.nxv8f32.nxv8f16( %1, %0, - i64 %2) + iXLen %2) ret %a } -define @intrinsic_vfwsub.w_wv_untie_nxv1f64_nxv1f64_nxv1f32( %0, %1, i64 %2) nounwind { +define @intrinsic_vfwsub.w_wv_untie_nxv1f64_nxv1f64_nxv1f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wv_untie_nxv1f64_nxv1f64_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -1194,12 +1196,12 @@ entry: %a = call @llvm.riscv.vfwsub.w.nxv1f64.nxv1f32( %1, %0, - i64 %2) + iXLen %2) ret %a } -define @intrinsic_vfwsub.w_wv_untie_nxv2f64_nxv2f64_nxv2f32( %0, %1, i64 %2) nounwind { +define @intrinsic_vfwsub.w_wv_untie_nxv2f64_nxv2f64_nxv2f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wv_untie_nxv2f64_nxv2f64_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -1210,12 +1212,12 @@ entry: %a = call @llvm.riscv.vfwsub.w.nxv2f64.nxv2f32( %1, %0, - i64 %2) + iXLen %2) ret %a } -define @intrinsic_vfwsub.w_wv_untie_nxv4f64_nxv4f64_nxv4f32( %0, %1, i64 %2) nounwind { +define @intrinsic_vfwsub.w_wv_untie_nxv4f64_nxv4f64_nxv4f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wv_untie_nxv4f64_nxv4f64_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -1226,12 +1228,12 @@ entry: %a = call @llvm.riscv.vfwsub.w.nxv4f64.nxv4f32( %1, %0, - i64 %2) + iXLen %2) ret %a } -define @intrinsic_vfwsub.w_wv_untie_nxv8f64_nxv8f64_nxv8f32( %0, %1, i64 %2) nounwind { +define @intrinsic_vfwsub.w_wv_untie_nxv8f64_nxv8f64_nxv8f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wv_untie_nxv8f64_nxv8f64_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -1242,7 +1244,7 @@ entry: %a = call @llvm.riscv.vfwsub.w.nxv8f64.nxv8f32( %1, %0, - i64 %2) + iXLen %2) ret %a } -- GitLab